From 29a836fd347e8cf56797eb937c50c90f18618d3d Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Tue, 11 Feb 2025 12:02:51 +0000 Subject: [PATCH 1/5] add preview clients --- .gitattributes | 80 + apps/v2preview/api.go | 211 + apps/v2preview/client.go | 45 + apps/v2preview/impl.go | 237 ++ apps/v2preview/model.go | 750 ++++ billing/v2preview/api.go | 466 +++ billing/v2preview/client.go | 160 + billing/v2preview/impl.go | 318 ++ billing/v2preview/model.go | 1196 ++++++ catalog/v2preview/api.go | 3409 +++++++++++++++++ catalog/v2preview/client.go | 815 ++++ catalog/v2preview/impl.go | 2111 ++++++++++ catalog/v2preview/model.go | 6163 ++++++++++++++++++++++++++++++ cleanrooms/v2preview/api.go | 257 ++ cleanrooms/v2preview/client.go | 113 + cleanrooms/v2preview/impl.go | 253 ++ cleanrooms/v2preview/model.go | 1329 +++++++ compute/v2preview/api.go | 1501 ++++++++ compute/v2preview/client.go | 317 ++ compute/v2preview/impl.go | 1070 ++++++ compute/v2preview/model.go | 5492 ++++++++++++++++++++++++++ dashboards/v2preview/api.go | 412 ++ dashboards/v2preview/client.go | 147 + dashboards/v2preview/impl.go | 413 ++ dashboards/v2preview/model.go | 1641 ++++++++ files/v2preview/api.go | 479 +++ files/v2preview/client.go | 79 + files/v2preview/impl.go | 295 ++ files/v2preview/model.go | 388 ++ iam/v2preview/api.go | 1392 +++++++ iam/v2preview/client.go | 433 +++ iam/v2preview/impl.go | 962 +++++ iam/v2preview/model.go | 1513 ++++++++ jobs/v2/model.go | 869 ++++- jobs/v2preview/api.go | 485 +++ jobs/v2preview/client.go | 79 + jobs/v2preview/impl.go | 374 ++ jobs/v2preview/model.go | 6086 +++++++++++++++++++++++++++++ marketplace/v2preview/api.go | 1528 ++++++++ marketplace/v2preview/client.go | 419 ++ marketplace/v2preview/impl.go | 1150 ++++++ marketplace/v2preview/model.go | 2074 ++++++++++ ml/v2preview/api.go | 678 ++++ ml/v2preview/client.go | 79 + ml/v2preview/impl.go | 1113 ++++++ ml/v2preview/model.go | 3074 +++++++++++++++ oauth2/v2preview/api.go | 541 +++ oauth2/v2preview/client.go | 190 + oauth2/v2preview/impl.go | 507 +++ oauth2/v2preview/model.go | 708 ++++ pipelines/v2preview/api.go | 304 ++ pipelines/v2preview/client.go | 45 + pipelines/v2preview/impl.go | 233 ++ pipelines/v2preview/model.go | 2321 +++++++++++ provisioning/v2preview/api.go | 1385 +++++++ provisioning/v2preview/client.go | 220 ++ provisioning/v2preview/impl.go | 355 ++ provisioning/v2preview/model.go | 1365 +++++++ serving/v2preview/api.go | 306 ++ serving/v2preview/client.go | 113 + serving/v2preview/impl.go | 253 ++ serving/v2preview/model.go | 2153 +++++++++++ settings/v2preview/api.go | 1635 ++++++++ settings/v2preview/client.go | 859 +++++ settings/v2preview/impl.go | 1150 ++++++ settings/v2preview/model.go | 2984 +++++++++++++++ sharing/v2/model.go | 136 +- sharing/v2preview/api.go | 532 +++ sharing/v2preview/client.go | 147 + sharing/v2preview/impl.go | 424 ++ sharing/v2preview/model.go | 1305 +++++++ sql/v2preview/api.go | 1842 +++++++++ sql/v2preview/client.go | 487 +++ sql/v2preview/impl.go | 952 +++++ sql/v2preview/model.go | 4823 +++++++++++++++++++++++ vectorsearch/v2preview/api.go | 182 + vectorsearch/v2preview/client.go | 79 + vectorsearch/v2preview/impl.go | 231 ++ vectorsearch/v2preview/model.go | 898 +++++ workspace/v2preview/api.go | 931 +++++ workspace/v2preview/client.go | 147 + workspace/v2preview/impl.go | 614 +++ workspace/v2preview/model.go | 1301 +++++++ 83 files changed, 85107 insertions(+), 6 deletions(-) create mode 100755 apps/v2preview/api.go create mode 100755 apps/v2preview/client.go create mode 100755 apps/v2preview/impl.go create mode 100755 apps/v2preview/model.go create mode 100755 billing/v2preview/api.go create mode 100755 billing/v2preview/client.go create mode 100755 billing/v2preview/impl.go create mode 100755 billing/v2preview/model.go create mode 100755 catalog/v2preview/api.go create mode 100755 catalog/v2preview/client.go create mode 100755 catalog/v2preview/impl.go create mode 100755 catalog/v2preview/model.go create mode 100755 cleanrooms/v2preview/api.go create mode 100755 cleanrooms/v2preview/client.go create mode 100755 cleanrooms/v2preview/impl.go create mode 100755 cleanrooms/v2preview/model.go create mode 100755 compute/v2preview/api.go create mode 100755 compute/v2preview/client.go create mode 100755 compute/v2preview/impl.go create mode 100755 compute/v2preview/model.go create mode 100755 dashboards/v2preview/api.go create mode 100755 dashboards/v2preview/client.go create mode 100755 dashboards/v2preview/impl.go create mode 100755 dashboards/v2preview/model.go create mode 100755 files/v2preview/api.go create mode 100755 files/v2preview/client.go create mode 100755 files/v2preview/impl.go create mode 100755 files/v2preview/model.go create mode 100755 iam/v2preview/api.go create mode 100755 iam/v2preview/client.go create mode 100755 iam/v2preview/impl.go create mode 100755 iam/v2preview/model.go create mode 100755 jobs/v2preview/api.go create mode 100755 jobs/v2preview/client.go create mode 100755 jobs/v2preview/impl.go create mode 100755 jobs/v2preview/model.go create mode 100755 marketplace/v2preview/api.go create mode 100755 marketplace/v2preview/client.go create mode 100755 marketplace/v2preview/impl.go create mode 100755 marketplace/v2preview/model.go create mode 100755 ml/v2preview/api.go create mode 100755 ml/v2preview/client.go create mode 100755 ml/v2preview/impl.go create mode 100755 ml/v2preview/model.go create mode 100755 oauth2/v2preview/api.go create mode 100755 oauth2/v2preview/client.go create mode 100755 oauth2/v2preview/impl.go create mode 100755 oauth2/v2preview/model.go create mode 100755 pipelines/v2preview/api.go create mode 100755 pipelines/v2preview/client.go create mode 100755 pipelines/v2preview/impl.go create mode 100755 pipelines/v2preview/model.go create mode 100755 provisioning/v2preview/api.go create mode 100755 provisioning/v2preview/client.go create mode 100755 provisioning/v2preview/impl.go create mode 100755 provisioning/v2preview/model.go create mode 100755 serving/v2preview/api.go create mode 100755 serving/v2preview/client.go create mode 100755 serving/v2preview/impl.go create mode 100755 serving/v2preview/model.go create mode 100755 settings/v2preview/api.go create mode 100755 settings/v2preview/client.go create mode 100755 settings/v2preview/impl.go create mode 100755 settings/v2preview/model.go create mode 100755 sharing/v2preview/api.go create mode 100755 sharing/v2preview/client.go create mode 100755 sharing/v2preview/impl.go create mode 100755 sharing/v2preview/model.go create mode 100755 sql/v2preview/api.go create mode 100755 sql/v2preview/client.go create mode 100755 sql/v2preview/impl.go create mode 100755 sql/v2preview/model.go create mode 100755 vectorsearch/v2preview/api.go create mode 100755 vectorsearch/v2preview/client.go create mode 100755 vectorsearch/v2preview/impl.go create mode 100755 vectorsearch/v2preview/model.go create mode 100755 workspace/v2preview/api.go create mode 100755 workspace/v2preview/client.go create mode 100755 workspace/v2preview/impl.go create mode 100755 workspace/v2preview/model.go diff --git a/.gitattributes b/.gitattributes index f865d7e0e..49638ea32 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,79 +2,159 @@ apps/v2/api.go linguist-generated=true apps/v2/client.go linguist-generated=true apps/v2/impl.go linguist-generated=true apps/v2/model.go linguist-generated=true +apps/v2preview/api.go linguist-generated=true +apps/v2preview/client.go linguist-generated=true +apps/v2preview/impl.go linguist-generated=true +apps/v2preview/model.go linguist-generated=true billing/v2/api.go linguist-generated=true billing/v2/client.go linguist-generated=true billing/v2/impl.go linguist-generated=true billing/v2/model.go linguist-generated=true +billing/v2preview/api.go linguist-generated=true +billing/v2preview/client.go linguist-generated=true +billing/v2preview/impl.go linguist-generated=true +billing/v2preview/model.go linguist-generated=true catalog/v2/api.go linguist-generated=true catalog/v2/client.go linguist-generated=true catalog/v2/impl.go linguist-generated=true catalog/v2/model.go linguist-generated=true +catalog/v2preview/api.go linguist-generated=true +catalog/v2preview/client.go linguist-generated=true +catalog/v2preview/impl.go linguist-generated=true +catalog/v2preview/model.go linguist-generated=true cleanrooms/v2/api.go linguist-generated=true cleanrooms/v2/client.go linguist-generated=true cleanrooms/v2/impl.go linguist-generated=true cleanrooms/v2/model.go linguist-generated=true +cleanrooms/v2preview/api.go linguist-generated=true +cleanrooms/v2preview/client.go linguist-generated=true +cleanrooms/v2preview/impl.go linguist-generated=true +cleanrooms/v2preview/model.go linguist-generated=true compute/v2/api.go linguist-generated=true compute/v2/client.go linguist-generated=true compute/v2/impl.go linguist-generated=true compute/v2/model.go linguist-generated=true +compute/v2preview/api.go linguist-generated=true +compute/v2preview/client.go linguist-generated=true +compute/v2preview/impl.go linguist-generated=true +compute/v2preview/model.go linguist-generated=true dashboards/v2/api.go linguist-generated=true dashboards/v2/client.go linguist-generated=true dashboards/v2/impl.go linguist-generated=true dashboards/v2/model.go linguist-generated=true +dashboards/v2preview/api.go linguist-generated=true +dashboards/v2preview/client.go linguist-generated=true +dashboards/v2preview/impl.go linguist-generated=true +dashboards/v2preview/model.go linguist-generated=true files/v2/api.go linguist-generated=true files/v2/client.go linguist-generated=true files/v2/impl.go linguist-generated=true files/v2/model.go linguist-generated=true +files/v2preview/api.go linguist-generated=true +files/v2preview/client.go linguist-generated=true +files/v2preview/impl.go linguist-generated=true +files/v2preview/model.go linguist-generated=true iam/v2/api.go linguist-generated=true iam/v2/client.go linguist-generated=true iam/v2/impl.go linguist-generated=true iam/v2/model.go linguist-generated=true +iam/v2preview/api.go linguist-generated=true +iam/v2preview/client.go linguist-generated=true +iam/v2preview/impl.go linguist-generated=true +iam/v2preview/model.go linguist-generated=true jobs/v2/api.go linguist-generated=true jobs/v2/client.go linguist-generated=true jobs/v2/impl.go linguist-generated=true jobs/v2/model.go linguist-generated=true +jobs/v2preview/api.go linguist-generated=true +jobs/v2preview/client.go linguist-generated=true +jobs/v2preview/impl.go linguist-generated=true +jobs/v2preview/model.go linguist-generated=true marketplace/v2/api.go linguist-generated=true marketplace/v2/client.go linguist-generated=true marketplace/v2/impl.go linguist-generated=true marketplace/v2/model.go linguist-generated=true +marketplace/v2preview/api.go linguist-generated=true +marketplace/v2preview/client.go linguist-generated=true +marketplace/v2preview/impl.go linguist-generated=true +marketplace/v2preview/model.go linguist-generated=true ml/v2/api.go linguist-generated=true ml/v2/client.go linguist-generated=true ml/v2/impl.go linguist-generated=true ml/v2/model.go linguist-generated=true +ml/v2preview/api.go linguist-generated=true +ml/v2preview/client.go linguist-generated=true +ml/v2preview/impl.go linguist-generated=true +ml/v2preview/model.go linguist-generated=true oauth2/v2/api.go linguist-generated=true oauth2/v2/client.go linguist-generated=true oauth2/v2/impl.go linguist-generated=true oauth2/v2/model.go linguist-generated=true +oauth2/v2preview/api.go linguist-generated=true +oauth2/v2preview/client.go linguist-generated=true +oauth2/v2preview/impl.go linguist-generated=true +oauth2/v2preview/model.go linguist-generated=true pipelines/v2/api.go linguist-generated=true pipelines/v2/client.go linguist-generated=true pipelines/v2/impl.go linguist-generated=true pipelines/v2/model.go linguist-generated=true +pipelines/v2preview/api.go linguist-generated=true +pipelines/v2preview/client.go linguist-generated=true +pipelines/v2preview/impl.go linguist-generated=true +pipelines/v2preview/model.go linguist-generated=true provisioning/v2/api.go linguist-generated=true provisioning/v2/client.go linguist-generated=true provisioning/v2/impl.go linguist-generated=true provisioning/v2/model.go linguist-generated=true +provisioning/v2preview/api.go linguist-generated=true +provisioning/v2preview/client.go linguist-generated=true +provisioning/v2preview/impl.go linguist-generated=true +provisioning/v2preview/model.go linguist-generated=true serving/v2/api.go linguist-generated=true serving/v2/client.go linguist-generated=true serving/v2/impl.go linguist-generated=true serving/v2/model.go linguist-generated=true +serving/v2preview/api.go linguist-generated=true +serving/v2preview/client.go linguist-generated=true +serving/v2preview/impl.go linguist-generated=true +serving/v2preview/model.go linguist-generated=true settings/v2/api.go linguist-generated=true settings/v2/client.go linguist-generated=true settings/v2/impl.go linguist-generated=true settings/v2/model.go linguist-generated=true +settings/v2preview/api.go linguist-generated=true +settings/v2preview/client.go linguist-generated=true +settings/v2preview/impl.go linguist-generated=true +settings/v2preview/model.go linguist-generated=true sharing/v2/api.go linguist-generated=true sharing/v2/client.go linguist-generated=true sharing/v2/impl.go linguist-generated=true sharing/v2/model.go linguist-generated=true +sharing/v2preview/api.go linguist-generated=true +sharing/v2preview/client.go linguist-generated=true +sharing/v2preview/impl.go linguist-generated=true +sharing/v2preview/model.go linguist-generated=true sql/v2/api.go linguist-generated=true sql/v2/client.go linguist-generated=true sql/v2/impl.go linguist-generated=true sql/v2/model.go linguist-generated=true +sql/v2preview/api.go linguist-generated=true +sql/v2preview/client.go linguist-generated=true +sql/v2preview/impl.go linguist-generated=true +sql/v2preview/model.go linguist-generated=true vectorsearch/v2/api.go linguist-generated=true vectorsearch/v2/client.go linguist-generated=true vectorsearch/v2/impl.go linguist-generated=true vectorsearch/v2/model.go linguist-generated=true +vectorsearch/v2preview/api.go linguist-generated=true +vectorsearch/v2preview/client.go linguist-generated=true +vectorsearch/v2preview/impl.go linguist-generated=true +vectorsearch/v2preview/model.go linguist-generated=true workspace/v2/api.go linguist-generated=true workspace/v2/client.go linguist-generated=true workspace/v2/impl.go linguist-generated=true workspace/v2/model.go linguist-generated=true +workspace/v2preview/api.go linguist-generated=true +workspace/v2preview/client.go linguist-generated=true +workspace/v2preview/impl.go linguist-generated=true +workspace/v2preview/model.go linguist-generated=true diff --git a/apps/v2preview/api.go b/apps/v2preview/api.go new file mode 100755 index 000000000..feb34d7e3 --- /dev/null +++ b/apps/v2preview/api.go @@ -0,0 +1,211 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. +package appspreview + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" +) + +type AppsPreviewInterface interface { + + // Create an app. + // + // Creates a new app. + Create(ctx context.Context, request CreateAppRequest) (*App, error) + + // Delete an app. + // + // Deletes an app. + Delete(ctx context.Context, request DeleteAppRequest) (*App, error) + + // Delete an app. + // + // Deletes an app. + DeleteByName(ctx context.Context, name string) (*App, error) + + // Create an app deployment. + // + // Creates an app deployment for the app with the supplied name. + Deploy(ctx context.Context, request CreateAppDeploymentRequest) (*AppDeployment, error) + + // Get an app. + // + // Retrieves information for the app with the supplied name. + Get(ctx context.Context, request GetAppRequest) (*App, error) + + // Get an app. + // + // Retrieves information for the app with the supplied name. + GetByName(ctx context.Context, name string) (*App, error) + + // Get an app deployment. + // + // Retrieves information for the app deployment with the supplied name and + // deployment id. + GetDeployment(ctx context.Context, request GetAppDeploymentRequest) (*AppDeployment, error) + + // Get an app deployment. + // + // Retrieves information for the app deployment with the supplied name and + // deployment id. + GetDeploymentByAppNameAndDeploymentId(ctx context.Context, appName string, deploymentId string) (*AppDeployment, error) + + // Get app permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetAppPermissionLevelsRequest) (*GetAppPermissionLevelsResponse, error) + + // Get app permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByAppName(ctx context.Context, appName string) (*GetAppPermissionLevelsResponse, error) + + // Get app permissions. + // + // Gets the permissions of an app. Apps can inherit permissions from their root + // object. + GetPermissions(ctx context.Context, request GetAppPermissionsRequest) (*AppPermissions, error) + + // Get app permissions. + // + // Gets the permissions of an app. Apps can inherit permissions from their root + // object. + GetPermissionsByAppName(ctx context.Context, appName string) (*AppPermissions, error) + + // List apps. + // + // Lists all apps in the workspace. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAppsRequest) listing.Iterator[App] + + // List apps. + // + // Lists all apps in the workspace. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAppsRequest) ([]App, error) + + // List app deployments. + // + // Lists all app deployments for the app with the supplied name. + // + // This method is generated by Databricks SDK Code Generator. + ListDeployments(ctx context.Context, request ListAppDeploymentsRequest) listing.Iterator[AppDeployment] + + // List app deployments. + // + // Lists all app deployments for the app with the supplied name. + // + // This method is generated by Databricks SDK Code Generator. + ListDeploymentsAll(ctx context.Context, request ListAppDeploymentsRequest) ([]AppDeployment, error) + + // List app deployments. + // + // Lists all app deployments for the app with the supplied name. + ListDeploymentsByAppName(ctx context.Context, appName string) (*ListAppDeploymentsResponse, error) + + // Set app permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error) + + // Start an app. + // + // Start the last active deployment of the app in the workspace. + Start(ctx context.Context, request StartAppRequest) (*App, error) + + // Stop an app. + // + // Stops the active deployment of the app in the workspace. + Stop(ctx context.Context, request StopAppRequest) (*App, error) + + // Update an app. + // + // Updates the app with the supplied name. + Update(ctx context.Context, request UpdateAppRequest) (*App, error) + + // Update app permissions. + // + // Updates the permissions on an app. Apps can inherit permissions from their + // root object. + UpdatePermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error) +} + +func NewAppsPreview(client *client.DatabricksClient) *AppsPreviewAPI { + return &AppsPreviewAPI{ + appsPreviewImpl: appsPreviewImpl{ + client: client, + }, + } +} + +// Apps run directly on a customer’s Databricks instance, integrate with their +// data, use and extend Databricks services, and enable users to interact +// through single sign-on. +type AppsPreviewAPI struct { + appsPreviewImpl +} + +// Delete an app. +// +// Deletes an app. +func (a *AppsPreviewAPI) DeleteByName(ctx context.Context, name string) (*App, error) { + return a.appsPreviewImpl.Delete(ctx, DeleteAppRequest{ + Name: name, + }) +} + +// Get an app. +// +// Retrieves information for the app with the supplied name. +func (a *AppsPreviewAPI) GetByName(ctx context.Context, name string) (*App, error) { + return a.appsPreviewImpl.Get(ctx, GetAppRequest{ + Name: name, + }) +} + +// Get an app deployment. +// +// Retrieves information for the app deployment with the supplied name and +// deployment id. +func (a *AppsPreviewAPI) GetDeploymentByAppNameAndDeploymentId(ctx context.Context, appName string, deploymentId string) (*AppDeployment, error) { + return a.appsPreviewImpl.GetDeployment(ctx, GetAppDeploymentRequest{ + AppName: appName, + DeploymentId: deploymentId, + }) +} + +// Get app permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *AppsPreviewAPI) GetPermissionLevelsByAppName(ctx context.Context, appName string) (*GetAppPermissionLevelsResponse, error) { + return a.appsPreviewImpl.GetPermissionLevels(ctx, GetAppPermissionLevelsRequest{ + AppName: appName, + }) +} + +// Get app permissions. +// +// Gets the permissions of an app. Apps can inherit permissions from their root +// object. +func (a *AppsPreviewAPI) GetPermissionsByAppName(ctx context.Context, appName string) (*AppPermissions, error) { + return a.appsPreviewImpl.GetPermissions(ctx, GetAppPermissionsRequest{ + AppName: appName, + }) +} + +// List app deployments. +// +// Lists all app deployments for the app with the supplied name. +func (a *AppsPreviewAPI) ListDeploymentsByAppName(ctx context.Context, appName string) (*ListAppDeploymentsResponse, error) { + return a.appsPreviewImpl.internalListDeployments(ctx, ListAppDeploymentsRequest{ + AppName: appName, + }) +} diff --git a/apps/v2preview/client.go b/apps/v2preview/client.go new file mode 100755 index 000000000..acb6d473e --- /dev/null +++ b/apps/v2preview/client.go @@ -0,0 +1,45 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package appspreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type AppsPreviewClient struct { + AppsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewAppsPreviewClient(cfg *config.Config) (*AppsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &AppsPreviewClient{ + Config: cfg, + apiClient: apiClient, + AppsPreviewInterface: NewAppsPreview(databricksClient), + }, nil +} diff --git a/apps/v2preview/impl.go b/apps/v2preview/impl.go new file mode 100755 index 000000000..ad1415fda --- /dev/null +++ b/apps/v2preview/impl.go @@ -0,0 +1,237 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package appspreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" + "golang.org/x/exp/slices" +) + +// unexported type that holds implementations of just AppsPreview API methods +type appsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *appsPreviewImpl) Create(ctx context.Context, request CreateAppRequest) (*App, error) { + var app App + path := "/api/2.0preview/apps" + queryParams := make(map[string]any) + if request.NoCompute != false || slices.Contains(request.ForceSendFields, "NoCompute") { + queryParams["no_compute"] = request.NoCompute + } + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.App, &app) + return &app, err +} + +func (a *appsPreviewImpl) Delete(ctx context.Context, request DeleteAppRequest) (*App, error) { + var app App + path := fmt.Sprintf("/api/2.0preview/apps/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &app) + return &app, err +} + +func (a *appsPreviewImpl) Deploy(ctx context.Context, request CreateAppDeploymentRequest) (*AppDeployment, error) { + var appDeployment AppDeployment + path := fmt.Sprintf("/api/2.0preview/apps/%v/deployments", request.AppName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.AppDeployment, &appDeployment) + return &appDeployment, err +} + +func (a *appsPreviewImpl) Get(ctx context.Context, request GetAppRequest) (*App, error) { + var app App + path := fmt.Sprintf("/api/2.0preview/apps/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &app) + return &app, err +} + +func (a *appsPreviewImpl) GetDeployment(ctx context.Context, request GetAppDeploymentRequest) (*AppDeployment, error) { + var appDeployment AppDeployment + path := fmt.Sprintf("/api/2.0preview/apps/%v/deployments/%v", request.AppName, request.DeploymentId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &appDeployment) + return &appDeployment, err +} + +func (a *appsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetAppPermissionLevelsRequest) (*GetAppPermissionLevelsResponse, error) { + var getAppPermissionLevelsResponse GetAppPermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/apps/%v/permissionLevels", request.AppName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getAppPermissionLevelsResponse) + return &getAppPermissionLevelsResponse, err +} + +func (a *appsPreviewImpl) GetPermissions(ctx context.Context, request GetAppPermissionsRequest) (*AppPermissions, error) { + var appPermissions AppPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/apps/%v", request.AppName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &appPermissions) + return &appPermissions, err +} + +// List apps. +// +// Lists all apps in the workspace. +func (a *appsPreviewImpl) List(ctx context.Context, request ListAppsRequest) listing.Iterator[App] { + + getNextPage := func(ctx context.Context, req ListAppsRequest) (*ListAppsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAppsResponse) []App { + return resp.Apps + } + getNextReq := func(resp *ListAppsResponse) *ListAppsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List apps. +// +// Lists all apps in the workspace. +func (a *appsPreviewImpl) ListAll(ctx context.Context, request ListAppsRequest) ([]App, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[App](ctx, iterator) +} +func (a *appsPreviewImpl) internalList(ctx context.Context, request ListAppsRequest) (*ListAppsResponse, error) { + var listAppsResponse ListAppsResponse + path := "/api/2.0preview/apps" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAppsResponse) + return &listAppsResponse, err +} + +// List app deployments. +// +// Lists all app deployments for the app with the supplied name. +func (a *appsPreviewImpl) ListDeployments(ctx context.Context, request ListAppDeploymentsRequest) listing.Iterator[AppDeployment] { + + getNextPage := func(ctx context.Context, req ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListDeployments(ctx, req) + } + getItems := func(resp *ListAppDeploymentsResponse) []AppDeployment { + return resp.AppDeployments + } + getNextReq := func(resp *ListAppDeploymentsResponse) *ListAppDeploymentsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List app deployments. +// +// Lists all app deployments for the app with the supplied name. +func (a *appsPreviewImpl) ListDeploymentsAll(ctx context.Context, request ListAppDeploymentsRequest) ([]AppDeployment, error) { + iterator := a.ListDeployments(ctx, request) + return listing.ToSlice[AppDeployment](ctx, iterator) +} +func (a *appsPreviewImpl) internalListDeployments(ctx context.Context, request ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) { + var listAppDeploymentsResponse ListAppDeploymentsResponse + path := fmt.Sprintf("/api/2.0preview/apps/%v/deployments", request.AppName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAppDeploymentsResponse) + return &listAppDeploymentsResponse, err +} + +func (a *appsPreviewImpl) SetPermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error) { + var appPermissions AppPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/apps/%v", request.AppName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &appPermissions) + return &appPermissions, err +} + +func (a *appsPreviewImpl) Start(ctx context.Context, request StartAppRequest) (*App, error) { + var app App + path := fmt.Sprintf("/api/2.0preview/apps/%v/start", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &app) + return &app, err +} + +func (a *appsPreviewImpl) Stop(ctx context.Context, request StopAppRequest) (*App, error) { + var app App + path := fmt.Sprintf("/api/2.0preview/apps/%v/stop", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &app) + return &app, err +} + +func (a *appsPreviewImpl) Update(ctx context.Context, request UpdateAppRequest) (*App, error) { + var app App + path := fmt.Sprintf("/api/2.0preview/apps/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.App, &app) + return &app, err +} + +func (a *appsPreviewImpl) UpdatePermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error) { + var appPermissions AppPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/apps/%v", request.AppName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &appPermissions) + return &appPermissions, err +} diff --git a/apps/v2preview/model.go b/apps/v2preview/model.go new file mode 100755 index 000000000..697a5c8bc --- /dev/null +++ b/apps/v2preview/model.go @@ -0,0 +1,750 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package appspreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type App struct { + // The active deployment of the app. A deployment is considered active when + // it has been deployed to the app compute. + ActiveDeployment *AppDeployment `json:"active_deployment,omitempty"` + + AppStatus *ApplicationStatus `json:"app_status,omitempty"` + + ComputeStatus *ComputeStatus `json:"compute_status,omitempty"` + // The creation time of the app. Formatted timestamp in ISO 6801. + CreateTime string `json:"create_time,omitempty"` + // The email of the user that created the app. + Creator string `json:"creator,omitempty"` + // The default workspace file system path of the source code from which app + // deployment are created. This field tracks the workspace source code path + // of the last active deployment. + DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` + // The description of the app. + Description string `json:"description,omitempty"` + // The unique identifier of the app. + Id string `json:"id,omitempty"` + // The name of the app. The name must contain only lowercase alphanumeric + // characters and hyphens. It must be unique within the workspace. + Name string `json:"name"` + // The pending deployment of the app. A deployment is considered pending + // when it is being prepared for deployment to the app compute. + PendingDeployment *AppDeployment `json:"pending_deployment,omitempty"` + // Resources for the app. + Resources []AppResource `json:"resources,omitempty"` + + ServicePrincipalClientId string `json:"service_principal_client_id,omitempty"` + + ServicePrincipalId int64 `json:"service_principal_id,omitempty"` + + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // The update time of the app. Formatted timestamp in ISO 6801. + UpdateTime string `json:"update_time,omitempty"` + // The email of the user that last updated the app. + Updater string `json:"updater,omitempty"` + // The URL of the app once it is deployed. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *App) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s App) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AppAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel AppPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AppAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AppAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AppAccessControlResponse struct { + // All permissions. + AllPermissions []AppPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AppAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AppAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AppDeployment struct { + // The creation time of the deployment. Formatted timestamp in ISO 6801. + CreateTime string `json:"create_time,omitempty"` + // The email of the user creates the deployment. + Creator string `json:"creator,omitempty"` + // The deployment artifacts for an app. + DeploymentArtifacts *AppDeploymentArtifacts `json:"deployment_artifacts,omitempty"` + // The unique id of the deployment. + DeploymentId string `json:"deployment_id,omitempty"` + // The mode of which the deployment will manage the source code. + Mode AppDeploymentMode `json:"mode,omitempty"` + // The workspace file system path of the source code used to create the app + // deployment. This is different from + // `deployment_artifacts.source_code_path`, which is the path used by the + // deployed app. The former refers to the original source code location of + // the app in the workspace during deployment creation, whereas the latter + // provides a system generated stable snapshotted source code path used by + // the deployment. + SourceCodePath string `json:"source_code_path,omitempty"` + // Status and status message of the deployment + Status *AppDeploymentStatus `json:"status,omitempty"` + // The update time of the deployment. Formatted timestamp in ISO 6801. + UpdateTime string `json:"update_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AppDeployment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AppDeployment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AppDeploymentArtifacts struct { + // The snapshotted workspace file system path of the source code loaded by + // the deployed app. + SourceCodePath string `json:"source_code_path,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AppDeploymentArtifacts) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AppDeploymentArtifacts) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AppDeploymentMode string + +const AppDeploymentModeAutoSync AppDeploymentMode = `AUTO_SYNC` + +const AppDeploymentModeSnapshot AppDeploymentMode = `SNAPSHOT` + +// String representation for [fmt.Print] +func (f *AppDeploymentMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AppDeploymentMode) Set(v string) error { + switch v { + case `AUTO_SYNC`, `SNAPSHOT`: + *f = AppDeploymentMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AUTO_SYNC", "SNAPSHOT"`, v) + } +} + +// Type always returns AppDeploymentMode to satisfy [pflag.Value] interface +func (f *AppDeploymentMode) Type() string { + return "AppDeploymentMode" +} + +type AppDeploymentState string + +const AppDeploymentStateCancelled AppDeploymentState = `CANCELLED` + +const AppDeploymentStateFailed AppDeploymentState = `FAILED` + +const AppDeploymentStateInProgress AppDeploymentState = `IN_PROGRESS` + +const AppDeploymentStateSucceeded AppDeploymentState = `SUCCEEDED` + +// String representation for [fmt.Print] +func (f *AppDeploymentState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AppDeploymentState) Set(v string) error { + switch v { + case `CANCELLED`, `FAILED`, `IN_PROGRESS`, `SUCCEEDED`: + *f = AppDeploymentState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELLED", "FAILED", "IN_PROGRESS", "SUCCEEDED"`, v) + } +} + +// Type always returns AppDeploymentState to satisfy [pflag.Value] interface +func (f *AppDeploymentState) Type() string { + return "AppDeploymentState" +} + +type AppDeploymentStatus struct { + // Message corresponding with the deployment state. + Message string `json:"message,omitempty"` + // State of the deployment. + State AppDeploymentState `json:"state,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AppDeploymentStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AppDeploymentStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AppPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel AppPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AppPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AppPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type AppPermissionLevel string + +const AppPermissionLevelCanManage AppPermissionLevel = `CAN_MANAGE` + +const AppPermissionLevelCanUse AppPermissionLevel = `CAN_USE` + +// String representation for [fmt.Print] +func (f *AppPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AppPermissionLevel) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_USE`: + *f = AppPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_USE"`, v) + } +} + +// Type always returns AppPermissionLevel to satisfy [pflag.Value] interface +func (f *AppPermissionLevel) Type() string { + return "AppPermissionLevel" +} + +type AppPermissions struct { + AccessControlList []AppAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AppPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AppPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AppPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel AppPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AppPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AppPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AppPermissionsRequest struct { + AccessControlList []AppAccessControlRequest `json:"access_control_list,omitempty"` + // The app for which to get or manage permissions. + AppName string `json:"-" url:"-"` +} + +type AppResource struct { + // Description of the App Resource. + Description string `json:"description,omitempty"` + + Job *AppResourceJob `json:"job,omitempty"` + // Name of the App Resource. + Name string `json:"name"` + + Secret *AppResourceSecret `json:"secret,omitempty"` + + ServingEndpoint *AppResourceServingEndpoint `json:"serving_endpoint,omitempty"` + + SqlWarehouse *AppResourceSqlWarehouse `json:"sql_warehouse,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AppResource) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AppResource) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AppResourceJob struct { + // Id of the job to grant permission on. + Id string `json:"id"` + // Permissions to grant on the Job. Supported permissions are: "CAN_MANAGE", + // "IS_OWNER", "CAN_MANAGE_RUN", "CAN_VIEW". + Permission AppResourceJobJobPermission `json:"permission"` +} + +type AppResourceJobJobPermission string + +const AppResourceJobJobPermissionCanManage AppResourceJobJobPermission = `CAN_MANAGE` + +const AppResourceJobJobPermissionCanManageRun AppResourceJobJobPermission = `CAN_MANAGE_RUN` + +const AppResourceJobJobPermissionCanView AppResourceJobJobPermission = `CAN_VIEW` + +const AppResourceJobJobPermissionIsOwner AppResourceJobJobPermission = `IS_OWNER` + +// String representation for [fmt.Print] +func (f *AppResourceJobJobPermission) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AppResourceJobJobPermission) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_MANAGE_RUN`, `CAN_VIEW`, `IS_OWNER`: + *f = AppResourceJobJobPermission(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_MANAGE_RUN", "CAN_VIEW", "IS_OWNER"`, v) + } +} + +// Type always returns AppResourceJobJobPermission to satisfy [pflag.Value] interface +func (f *AppResourceJobJobPermission) Type() string { + return "AppResourceJobJobPermission" +} + +type AppResourceSecret struct { + // Key of the secret to grant permission on. + Key string `json:"key"` + // Permission to grant on the secret scope. For secrets, only one permission + // is allowed. Permission must be one of: "READ", "WRITE", "MANAGE". + Permission AppResourceSecretSecretPermission `json:"permission"` + // Scope of the secret to grant permission on. + Scope string `json:"scope"` +} + +// Permission to grant on the secret scope. Supported permissions are: "READ", +// "WRITE", "MANAGE". +type AppResourceSecretSecretPermission string + +const AppResourceSecretSecretPermissionManage AppResourceSecretSecretPermission = `MANAGE` + +const AppResourceSecretSecretPermissionRead AppResourceSecretSecretPermission = `READ` + +const AppResourceSecretSecretPermissionWrite AppResourceSecretSecretPermission = `WRITE` + +// String representation for [fmt.Print] +func (f *AppResourceSecretSecretPermission) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AppResourceSecretSecretPermission) Set(v string) error { + switch v { + case `MANAGE`, `READ`, `WRITE`: + *f = AppResourceSecretSecretPermission(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MANAGE", "READ", "WRITE"`, v) + } +} + +// Type always returns AppResourceSecretSecretPermission to satisfy [pflag.Value] interface +func (f *AppResourceSecretSecretPermission) Type() string { + return "AppResourceSecretSecretPermission" +} + +type AppResourceServingEndpoint struct { + // Name of the serving endpoint to grant permission on. + Name string `json:"name"` + // Permission to grant on the serving endpoint. Supported permissions are: + // "CAN_MANAGE", "CAN_QUERY", "CAN_VIEW". + Permission AppResourceServingEndpointServingEndpointPermission `json:"permission"` +} + +type AppResourceServingEndpointServingEndpointPermission string + +const AppResourceServingEndpointServingEndpointPermissionCanManage AppResourceServingEndpointServingEndpointPermission = `CAN_MANAGE` + +const AppResourceServingEndpointServingEndpointPermissionCanQuery AppResourceServingEndpointServingEndpointPermission = `CAN_QUERY` + +const AppResourceServingEndpointServingEndpointPermissionCanView AppResourceServingEndpointServingEndpointPermission = `CAN_VIEW` + +// String representation for [fmt.Print] +func (f *AppResourceServingEndpointServingEndpointPermission) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AppResourceServingEndpointServingEndpointPermission) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_QUERY`, `CAN_VIEW`: + *f = AppResourceServingEndpointServingEndpointPermission(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_QUERY", "CAN_VIEW"`, v) + } +} + +// Type always returns AppResourceServingEndpointServingEndpointPermission to satisfy [pflag.Value] interface +func (f *AppResourceServingEndpointServingEndpointPermission) Type() string { + return "AppResourceServingEndpointServingEndpointPermission" +} + +type AppResourceSqlWarehouse struct { + // Id of the SQL warehouse to grant permission on. + Id string `json:"id"` + // Permission to grant on the SQL warehouse. Supported permissions are: + // "CAN_MANAGE", "CAN_USE", "IS_OWNER". + Permission AppResourceSqlWarehouseSqlWarehousePermission `json:"permission"` +} + +type AppResourceSqlWarehouseSqlWarehousePermission string + +const AppResourceSqlWarehouseSqlWarehousePermissionCanManage AppResourceSqlWarehouseSqlWarehousePermission = `CAN_MANAGE` + +const AppResourceSqlWarehouseSqlWarehousePermissionCanUse AppResourceSqlWarehouseSqlWarehousePermission = `CAN_USE` + +const AppResourceSqlWarehouseSqlWarehousePermissionIsOwner AppResourceSqlWarehouseSqlWarehousePermission = `IS_OWNER` + +// String representation for [fmt.Print] +func (f *AppResourceSqlWarehouseSqlWarehousePermission) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AppResourceSqlWarehouseSqlWarehousePermission) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_USE`, `IS_OWNER`: + *f = AppResourceSqlWarehouseSqlWarehousePermission(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_USE", "IS_OWNER"`, v) + } +} + +// Type always returns AppResourceSqlWarehouseSqlWarehousePermission to satisfy [pflag.Value] interface +func (f *AppResourceSqlWarehouseSqlWarehousePermission) Type() string { + return "AppResourceSqlWarehouseSqlWarehousePermission" +} + +type ApplicationState string + +const ApplicationStateCrashed ApplicationState = `CRASHED` + +const ApplicationStateDeploying ApplicationState = `DEPLOYING` + +const ApplicationStateRunning ApplicationState = `RUNNING` + +const ApplicationStateUnavailable ApplicationState = `UNAVAILABLE` + +// String representation for [fmt.Print] +func (f *ApplicationState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ApplicationState) Set(v string) error { + switch v { + case `CRASHED`, `DEPLOYING`, `RUNNING`, `UNAVAILABLE`: + *f = ApplicationState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CRASHED", "DEPLOYING", "RUNNING", "UNAVAILABLE"`, v) + } +} + +// Type always returns ApplicationState to satisfy [pflag.Value] interface +func (f *ApplicationState) Type() string { + return "ApplicationState" +} + +type ApplicationStatus struct { + // Application status message + Message string `json:"message,omitempty"` + // State of the application. + State ApplicationState `json:"state,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ApplicationStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ApplicationStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ComputeState string + +const ComputeStateActive ComputeState = `ACTIVE` + +const ComputeStateDeleting ComputeState = `DELETING` + +const ComputeStateError ComputeState = `ERROR` + +const ComputeStateStarting ComputeState = `STARTING` + +const ComputeStateStopped ComputeState = `STOPPED` + +const ComputeStateStopping ComputeState = `STOPPING` + +const ComputeStateUpdating ComputeState = `UPDATING` + +// String representation for [fmt.Print] +func (f *ComputeState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ComputeState) Set(v string) error { + switch v { + case `ACTIVE`, `DELETING`, `ERROR`, `STARTING`, `STOPPED`, `STOPPING`, `UPDATING`: + *f = ComputeState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "DELETING", "ERROR", "STARTING", "STOPPED", "STOPPING", "UPDATING"`, v) + } +} + +// Type always returns ComputeState to satisfy [pflag.Value] interface +func (f *ComputeState) Type() string { + return "ComputeState" +} + +type ComputeStatus struct { + // Compute status message + Message string `json:"message,omitempty"` + // State of the app compute. + State ComputeState `json:"state,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ComputeStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ComputeStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Create an app deployment +type CreateAppDeploymentRequest struct { + AppDeployment *AppDeployment `json:"app_deployment,omitempty"` + // The name of the app. + AppName string `json:"-" url:"-"` +} + +// Create an app +type CreateAppRequest struct { + App *App `json:"app,omitempty"` + // If true, the app will not be started after creation. + NoCompute bool `json:"-" url:"no_compute,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateAppRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateAppRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete an app +type DeleteAppRequest struct { + // The name of the app. + Name string `json:"-" url:"-"` +} + +// Get an app deployment +type GetAppDeploymentRequest struct { + // The name of the app. + AppName string `json:"-" url:"-"` + // The unique id of the deployment. + DeploymentId string `json:"-" url:"-"` +} + +// Get app permission levels +type GetAppPermissionLevelsRequest struct { + // The app for which to get or manage permissions. + AppName string `json:"-" url:"-"` +} + +type GetAppPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []AppPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get app permissions +type GetAppPermissionsRequest struct { + // The app for which to get or manage permissions. + AppName string `json:"-" url:"-"` +} + +// Get an app +type GetAppRequest struct { + // The name of the app. + Name string `json:"-" url:"-"` +} + +// List app deployments +type ListAppDeploymentsRequest struct { + // The name of the app. + AppName string `json:"-" url:"-"` + // Upper bound for items returned. + PageSize int `json:"-" url:"page_size,omitempty"` + // Pagination token to go to the next page of apps. Requests first page if + // absent. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAppDeploymentsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAppDeploymentsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListAppDeploymentsResponse struct { + // Deployment history of the app. + AppDeployments []AppDeployment `json:"app_deployments,omitempty"` + // Pagination token to request the next page of apps. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAppDeploymentsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAppDeploymentsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List apps +type ListAppsRequest struct { + // Upper bound for items returned. + PageSize int `json:"-" url:"page_size,omitempty"` + // Pagination token to go to the next page of apps. Requests first page if + // absent. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAppsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAppsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListAppsResponse struct { + Apps []App `json:"apps,omitempty"` + // Pagination token to request the next page of apps. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAppsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAppsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type StartAppRequest struct { + // The name of the app. + Name string `json:"-" url:"-"` +} + +type StopAppRequest struct { + // The name of the app. + Name string `json:"-" url:"-"` +} + +// Update an app +type UpdateAppRequest struct { + App *App `json:"app,omitempty"` + // The name of the app. The name must contain only lowercase alphanumeric + // characters and hyphens. It must be unique within the workspace. + Name string `json:"-" url:"-"` +} diff --git a/billing/v2preview/api.go b/billing/v2preview/api.go new file mode 100755 index 000000000..0b2d503bf --- /dev/null +++ b/billing/v2preview/api.go @@ -0,0 +1,466 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Billable Usage Preview, Budget Policy Preview, Budgets Preview, Log Delivery Preview, Usage Dashboards Preview, etc. +package billingpreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type BillableUsagePreviewInterface interface { + + // Return billable usage logs. + // + // Returns billable usage logs in CSV format for the specified account and date + // range. For the data schema, see [CSV file schema]. Note that this method + // might take multiple minutes to complete. + // + // **Warning**: Depending on the queried date range, the number of workspaces in + // the account, the size of the response and the internet speed of the caller, + // this API may hit a timeout after a few minutes. If you experience this, try + // to mitigate by calling the API with narrower date ranges. + // + // [CSV file schema]: https://docs.databricks.com/administration-guide/account-settings/usage-analysis.html#schema + Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) +} + +func NewBillableUsagePreview(client *client.DatabricksClient) *BillableUsagePreviewAPI { + return &BillableUsagePreviewAPI{ + billableUsagePreviewImpl: billableUsagePreviewImpl{ + client: client, + }, + } +} + +// This API allows you to download billable usage logs for the specified account +// and date range. This feature works with all account types. +type BillableUsagePreviewAPI struct { + billableUsagePreviewImpl +} + +type BudgetPolicyPreviewInterface interface { + + // Create a budget policy. + // + // Creates a new policy. + Create(ctx context.Context, request CreateBudgetPolicyRequest) (*BudgetPolicy, error) + + // Delete a budget policy. + // + // Deletes a policy + Delete(ctx context.Context, request DeleteBudgetPolicyRequest) error + + // Delete a budget policy. + // + // Deletes a policy + DeleteByPolicyId(ctx context.Context, policyId string) error + + // Get a budget policy. + // + // Retrieves a policy by it's ID. + Get(ctx context.Context, request GetBudgetPolicyRequest) (*BudgetPolicy, error) + + // Get a budget policy. + // + // Retrieves a policy by it's ID. + GetByPolicyId(ctx context.Context, policyId string) (*BudgetPolicy, error) + + // List policies. + // + // Lists all policies. Policies are returned in the alphabetically ascending + // order of their names. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListBudgetPoliciesRequest) listing.Iterator[BudgetPolicy] + + // List policies. + // + // Lists all policies. Policies are returned in the alphabetically ascending + // order of their names. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListBudgetPoliciesRequest) ([]BudgetPolicy, error) + + // Update a budget policy. + // + // Updates a policy + Update(ctx context.Context, request UpdateBudgetPolicyRequest) (*BudgetPolicy, error) +} + +func NewBudgetPolicyPreview(client *client.DatabricksClient) *BudgetPolicyPreviewAPI { + return &BudgetPolicyPreviewAPI{ + budgetPolicyPreviewImpl: budgetPolicyPreviewImpl{ + client: client, + }, + } +} + +// A service serves REST API about Budget policies +type BudgetPolicyPreviewAPI struct { + budgetPolicyPreviewImpl +} + +// Delete a budget policy. +// +// Deletes a policy +func (a *BudgetPolicyPreviewAPI) DeleteByPolicyId(ctx context.Context, policyId string) error { + return a.budgetPolicyPreviewImpl.Delete(ctx, DeleteBudgetPolicyRequest{ + PolicyId: policyId, + }) +} + +// Get a budget policy. +// +// Retrieves a policy by it's ID. +func (a *BudgetPolicyPreviewAPI) GetByPolicyId(ctx context.Context, policyId string) (*BudgetPolicy, error) { + return a.budgetPolicyPreviewImpl.Get(ctx, GetBudgetPolicyRequest{ + PolicyId: policyId, + }) +} + +type BudgetsPreviewInterface interface { + + // Create new budget. + // + // Create a new budget configuration for an account. For full details, see + // https://docs.databricks.com/en/admin/account-settings/budgets.html. + Create(ctx context.Context, request CreateBudgetConfigurationRequest) (*CreateBudgetConfigurationResponse, error) + + // Delete budget. + // + // Deletes a budget configuration for an account. Both account and budget + // configuration are specified by ID. This cannot be undone. + Delete(ctx context.Context, request DeleteBudgetConfigurationRequest) error + + // Delete budget. + // + // Deletes a budget configuration for an account. Both account and budget + // configuration are specified by ID. This cannot be undone. + DeleteByBudgetId(ctx context.Context, budgetId string) error + + // Get budget. + // + // Gets a budget configuration for an account. Both account and budget + // configuration are specified by ID. + Get(ctx context.Context, request GetBudgetConfigurationRequest) (*GetBudgetConfigurationResponse, error) + + // Get budget. + // + // Gets a budget configuration for an account. Both account and budget + // configuration are specified by ID. + GetByBudgetId(ctx context.Context, budgetId string) (*GetBudgetConfigurationResponse, error) + + // Get all budgets. + // + // Gets all budgets associated with this account. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListBudgetConfigurationsRequest) listing.Iterator[BudgetConfiguration] + + // Get all budgets. + // + // Gets all budgets associated with this account. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListBudgetConfigurationsRequest) ([]BudgetConfiguration, error) + + // Modify budget. + // + // Updates a budget configuration for an account. Both account and budget + // configuration are specified by ID. + Update(ctx context.Context, request UpdateBudgetConfigurationRequest) (*UpdateBudgetConfigurationResponse, error) +} + +func NewBudgetsPreview(client *client.DatabricksClient) *BudgetsPreviewAPI { + return &BudgetsPreviewAPI{ + budgetsPreviewImpl: budgetsPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage budget configurations for this account. Budgets enable you +// to monitor usage across your account. You can set up budgets to either track +// account-wide spending, or apply filters to track the spending of specific +// teams, projects, or workspaces. +type BudgetsPreviewAPI struct { + budgetsPreviewImpl +} + +// Delete budget. +// +// Deletes a budget configuration for an account. Both account and budget +// configuration are specified by ID. This cannot be undone. +func (a *BudgetsPreviewAPI) DeleteByBudgetId(ctx context.Context, budgetId string) error { + return a.budgetsPreviewImpl.Delete(ctx, DeleteBudgetConfigurationRequest{ + BudgetId: budgetId, + }) +} + +// Get budget. +// +// Gets a budget configuration for an account. Both account and budget +// configuration are specified by ID. +func (a *BudgetsPreviewAPI) GetByBudgetId(ctx context.Context, budgetId string) (*GetBudgetConfigurationResponse, error) { + return a.budgetsPreviewImpl.Get(ctx, GetBudgetConfigurationRequest{ + BudgetId: budgetId, + }) +} + +type LogDeliveryPreviewInterface interface { + + // Create a new log delivery configuration. + // + // Creates a new Databricks log delivery configuration to enable delivery of the + // specified type of logs to your storage location. This requires that you + // already created a [credential object](:method:Credentials/Create) (which + // encapsulates a cross-account service IAM role) and a [storage configuration + // object](:method:Storage/Create) (which encapsulates an S3 bucket). + // + // For full details, including the required IAM role policies and bucket + // policies, see [Deliver and access billable usage logs] or [Configure audit + // logging]. + // + // **Note**: There is a limit on the number of log delivery configurations + // available per account (each limit applies separately to each log type + // including billable usage and audit logs). You can create a maximum of two + // enabled account-level delivery configurations (configurations without a + // workspace filter) per type. Additionally, you can create two enabled + // workspace-level delivery configurations per workspace for each log type, + // which means that the same workspace ID can occur in the workspace filter for + // no more than two delivery configurations per log type. + // + // You cannot delete a log delivery configuration, but you can disable it (see + // [Enable or disable log delivery + // configuration](:method:LogDelivery/PatchStatus)). + // + // [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [Deliver and access billable usage logs]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + Create(ctx context.Context, request WrappedCreateLogDeliveryConfiguration) (*WrappedLogDeliveryConfiguration, error) + + // Get log delivery configuration. + // + // Gets a Databricks log delivery configuration object for an account, both + // specified by ID. + Get(ctx context.Context, request GetLogDeliveryRequest) (*WrappedLogDeliveryConfiguration, error) + + // Get log delivery configuration. + // + // Gets a Databricks log delivery configuration object for an account, both + // specified by ID. + GetByLogDeliveryConfigurationId(ctx context.Context, logDeliveryConfigurationId string) (*WrappedLogDeliveryConfiguration, error) + + // Get all log delivery configurations. + // + // Gets all Databricks log delivery configurations associated with an account + // specified by ID. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListLogDeliveryRequest) listing.Iterator[LogDeliveryConfiguration] + + // Get all log delivery configurations. + // + // Gets all Databricks log delivery configurations associated with an account + // specified by ID. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListLogDeliveryRequest) ([]LogDeliveryConfiguration, error) + + // LogDeliveryConfigurationConfigNameToConfigIdMap calls [LogDeliveryPreviewAPI.ListAll] and creates a map of results with [LogDeliveryConfiguration].ConfigName as key and [LogDeliveryConfiguration].ConfigId as value. + // + // Returns an error if there's more than one [LogDeliveryConfiguration] with the same .ConfigName. + // + // Note: All [LogDeliveryConfiguration] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + LogDeliveryConfigurationConfigNameToConfigIdMap(ctx context.Context, request ListLogDeliveryRequest) (map[string]string, error) + + // GetByConfigName calls [LogDeliveryPreviewAPI.LogDeliveryConfigurationConfigNameToConfigIdMap] and returns a single [LogDeliveryConfiguration]. + // + // Returns an error if there's more than one [LogDeliveryConfiguration] with the same .ConfigName. + // + // Note: All [LogDeliveryConfiguration] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByConfigName(ctx context.Context, name string) (*LogDeliveryConfiguration, error) + + // Enable or disable log delivery configuration. + // + // Enables or disables a log delivery configuration. Deletion of delivery + // configurations is not supported, so disable log delivery configurations that + // are no longer needed. Note that you can't re-enable a delivery configuration + // if this would violate the delivery configuration limits described under + // [Create log delivery](:method:LogDelivery/Create). + PatchStatus(ctx context.Context, request UpdateLogDeliveryConfigurationStatusRequest) error +} + +func NewLogDeliveryPreview(client *client.DatabricksClient) *LogDeliveryPreviewAPI { + return &LogDeliveryPreviewAPI{ + logDeliveryPreviewImpl: logDeliveryPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage log delivery configurations for this account. The two +// supported log types for this API are _billable usage logs_ and _audit logs_. +// This feature is in Public Preview. This feature works with all account ID +// types. +// +// Log delivery works with all account types. However, if your account is on the +// E2 version of the platform or on a select custom plan that allows multiple +// workspaces per account, you can optionally configure different storage +// destinations for each workspace. Log delivery status is also provided to know +// the latest status of log delivery attempts. The high-level flow of billable +// usage delivery: +// +// 1. **Create storage**: In AWS, [create a new AWS S3 bucket] with a specific +// bucket policy. Using Databricks APIs, call the Account API to create a +// [storage configuration object](:method:Storage/Create) that uses the bucket +// name. 2. **Create credentials**: In AWS, create the appropriate AWS IAM role. +// For full details, including the required IAM role policies and trust +// relationship, see [Billable usage log delivery]. Using Databricks APIs, call +// the Account API to create a [credential configuration +// object](:method:Credentials/Create) that uses the IAM role"s ARN. 3. **Create +// log delivery configuration**: Using Databricks APIs, call the Account API to +// [create a log delivery configuration](:method:LogDelivery/Create) that uses +// the credential and storage configuration objects from previous steps. You can +// specify if the logs should include all events of that log type in your +// account (_Account level_ delivery) or only events for a specific set of +// workspaces (_workspace level_ delivery). Account level log delivery applies +// to all current and future workspaces plus account level logs, while workspace +// level log delivery solely delivers logs related to the specified workspaces. +// You can create multiple types of delivery configurations per account. +// +// For billable usage delivery: * For more information about billable usage +// logs, see [Billable usage log delivery]. For the CSV schema, see the [Usage +// page]. * The delivery location is +// `//billable-usage/csv/`, where `` is the name of +// the optional delivery path prefix you set up during log delivery +// configuration. Files are named +// `workspaceId=-usageMonth=.csv`. * All billable usage +// logs apply to specific workspaces (_workspace level_ logs). You can aggregate +// usage for your entire account by creating an _account level_ delivery +// configuration that delivers logs for all current and future workspaces in +// your account. * The files are delivered daily by overwriting the month's CSV +// file for each workspace. +// +// For audit log delivery: * For more information about about audit log +// delivery, see [Audit log delivery], which includes information about the used +// JSON schema. * The delivery location is +// `//workspaceId=/date=/auditlogs_.json`. +// Files may get overwritten with the same content multiple times to achieve +// exactly-once delivery. * If the audit log delivery configuration included +// specific workspace IDs, only _workspace-level_ audit logs for those +// workspaces are delivered. If the log delivery configuration applies to the +// entire account (_account level_ delivery configuration), the audit log +// delivery includes workspace-level audit logs for all workspaces in the +// account as well as account-level audit logs. See [Audit log delivery] for +// details. * Auditable events are typically available in logs within 15 +// minutes. +// +// [Audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html +// [Billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html +// [Usage page]: https://docs.databricks.com/administration-guide/account-settings/usage.html +// [create a new AWS S3 bucket]: https://docs.databricks.com/administration-guide/account-api/aws-storage.html +type LogDeliveryPreviewAPI struct { + logDeliveryPreviewImpl +} + +// Get log delivery configuration. +// +// Gets a Databricks log delivery configuration object for an account, both +// specified by ID. +func (a *LogDeliveryPreviewAPI) GetByLogDeliveryConfigurationId(ctx context.Context, logDeliveryConfigurationId string) (*WrappedLogDeliveryConfiguration, error) { + return a.logDeliveryPreviewImpl.Get(ctx, GetLogDeliveryRequest{ + LogDeliveryConfigurationId: logDeliveryConfigurationId, + }) +} + +// LogDeliveryConfigurationConfigNameToConfigIdMap calls [LogDeliveryPreviewAPI.ListAll] and creates a map of results with [LogDeliveryConfiguration].ConfigName as key and [LogDeliveryConfiguration].ConfigId as value. +// +// Returns an error if there's more than one [LogDeliveryConfiguration] with the same .ConfigName. +// +// Note: All [LogDeliveryConfiguration] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *LogDeliveryPreviewAPI) LogDeliveryConfigurationConfigNameToConfigIdMap(ctx context.Context, request ListLogDeliveryRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.ConfigName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .ConfigName: %s", key) + } + mapping[key] = v.ConfigId + } + return mapping, nil +} + +// GetByConfigName calls [LogDeliveryPreviewAPI.LogDeliveryConfigurationConfigNameToConfigIdMap] and returns a single [LogDeliveryConfiguration]. +// +// Returns an error if there's more than one [LogDeliveryConfiguration] with the same .ConfigName. +// +// Note: All [LogDeliveryConfiguration] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *LogDeliveryPreviewAPI) GetByConfigName(ctx context.Context, name string) (*LogDeliveryConfiguration, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListLogDeliveryRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]LogDeliveryConfiguration{} + for _, v := range result { + key := v.ConfigName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("LogDeliveryConfiguration named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of LogDeliveryConfiguration named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type UsageDashboardsPreviewInterface interface { + + // Create new usage dashboard. + // + // Create a usage dashboard specified by workspaceId, accountId, and dashboard + // type. + Create(ctx context.Context, request CreateBillingUsageDashboardRequest) (*CreateBillingUsageDashboardResponse, error) + + // Get usage dashboard. + // + // Get a usage dashboard specified by workspaceId, accountId, and dashboard + // type. + Get(ctx context.Context, request GetBillingUsageDashboardRequest) (*GetBillingUsageDashboardResponse, error) +} + +func NewUsageDashboardsPreview(client *client.DatabricksClient) *UsageDashboardsPreviewAPI { + return &UsageDashboardsPreviewAPI{ + usageDashboardsPreviewImpl: usageDashboardsPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage usage dashboards for this account. Usage dashboards enable +// you to gain insights into your usage with pre-built dashboards: visualize +// breakdowns, analyze tag attributions, and identify cost drivers. +type UsageDashboardsPreviewAPI struct { + usageDashboardsPreviewImpl +} diff --git a/billing/v2preview/client.go b/billing/v2preview/client.go new file mode 100755 index 000000000..dcbf0833f --- /dev/null +++ b/billing/v2preview/client.go @@ -0,0 +1,160 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package billingpreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" +) + +type BillableUsagePreviewClient struct { + BillableUsagePreviewInterface + + Config *config.Config +} + +func NewBillableUsagePreviewClient(cfg *config.Config) (*BillableUsagePreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &BillableUsagePreviewClient{ + Config: cfg, + BillableUsagePreviewInterface: NewBillableUsagePreview(apiClient), + }, nil +} + +type BudgetPolicyPreviewClient struct { + BudgetPolicyPreviewInterface + + Config *config.Config +} + +func NewBudgetPolicyPreviewClient(cfg *config.Config) (*BudgetPolicyPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &BudgetPolicyPreviewClient{ + Config: cfg, + BudgetPolicyPreviewInterface: NewBudgetPolicyPreview(apiClient), + }, nil +} + +type BudgetsPreviewClient struct { + BudgetsPreviewInterface + + Config *config.Config +} + +func NewBudgetsPreviewClient(cfg *config.Config) (*BudgetsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &BudgetsPreviewClient{ + Config: cfg, + BudgetsPreviewInterface: NewBudgetsPreview(apiClient), + }, nil +} + +type LogDeliveryPreviewClient struct { + LogDeliveryPreviewInterface + + Config *config.Config +} + +func NewLogDeliveryPreviewClient(cfg *config.Config) (*LogDeliveryPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &LogDeliveryPreviewClient{ + Config: cfg, + LogDeliveryPreviewInterface: NewLogDeliveryPreview(apiClient), + }, nil +} + +type UsageDashboardsPreviewClient struct { + UsageDashboardsPreviewInterface + + Config *config.Config +} + +func NewUsageDashboardsPreviewClient(cfg *config.Config) (*UsageDashboardsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &UsageDashboardsPreviewClient{ + Config: cfg, + UsageDashboardsPreviewInterface: NewUsageDashboardsPreview(apiClient), + }, nil +} diff --git a/billing/v2preview/impl.go b/billing/v2preview/impl.go new file mode 100755 index 000000000..e03271b44 --- /dev/null +++ b/billing/v2preview/impl.go @@ -0,0 +1,318 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package billingpreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just BillableUsagePreview API methods +type billableUsagePreviewImpl struct { + client *client.DatabricksClient +} + +func (a *billableUsagePreviewImpl) Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) { + var downloadResponse DownloadResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/usage/download", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "text/plain" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &downloadResponse) + return &downloadResponse, err +} + +// unexported type that holds implementations of just BudgetPolicyPreview API methods +type budgetPolicyPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *budgetPolicyPreviewImpl) Create(ctx context.Context, request CreateBudgetPolicyRequest) (*BudgetPolicy, error) { + var budgetPolicy BudgetPolicy + path := fmt.Sprintf("/api/2.1preview/accounts/%v/budget-policies", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &budgetPolicy) + return &budgetPolicy, err +} + +func (a *budgetPolicyPreviewImpl) Delete(ctx context.Context, request DeleteBudgetPolicyRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/accounts/%v/budget-policies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *budgetPolicyPreviewImpl) Get(ctx context.Context, request GetBudgetPolicyRequest) (*BudgetPolicy, error) { + var budgetPolicy BudgetPolicy + path := fmt.Sprintf("/api/2.1preview/accounts/%v/budget-policies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &budgetPolicy) + return &budgetPolicy, err +} + +// List policies. +// +// Lists all policies. Policies are returned in the alphabetically ascending +// order of their names. +func (a *budgetPolicyPreviewImpl) List(ctx context.Context, request ListBudgetPoliciesRequest) listing.Iterator[BudgetPolicy] { + + getNextPage := func(ctx context.Context, req ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListBudgetPoliciesResponse) []BudgetPolicy { + return resp.Policies + } + getNextReq := func(resp *ListBudgetPoliciesResponse) *ListBudgetPoliciesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List policies. +// +// Lists all policies. Policies are returned in the alphabetically ascending +// order of their names. +func (a *budgetPolicyPreviewImpl) ListAll(ctx context.Context, request ListBudgetPoliciesRequest) ([]BudgetPolicy, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[BudgetPolicy](ctx, iterator) +} +func (a *budgetPolicyPreviewImpl) internalList(ctx context.Context, request ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) { + var listBudgetPoliciesResponse ListBudgetPoliciesResponse + path := fmt.Sprintf("/api/2.1preview/accounts/%v/budget-policies", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listBudgetPoliciesResponse) + return &listBudgetPoliciesResponse, err +} + +func (a *budgetPolicyPreviewImpl) Update(ctx context.Context, request UpdateBudgetPolicyRequest) (*BudgetPolicy, error) { + var budgetPolicy BudgetPolicy + path := fmt.Sprintf("/api/2.1preview/accounts/%v/budget-policies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) + if request.LimitConfig != nil { + queryParams["limit_config"] = request.LimitConfig + } + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.Policy, &budgetPolicy) + return &budgetPolicy, err +} + +// unexported type that holds implementations of just BudgetsPreview API methods +type budgetsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *budgetsPreviewImpl) Create(ctx context.Context, request CreateBudgetConfigurationRequest) (*CreateBudgetConfigurationResponse, error) { + var createBudgetConfigurationResponse CreateBudgetConfigurationResponse + path := fmt.Sprintf("/api/2.1preview/accounts/%v/budgets", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createBudgetConfigurationResponse) + return &createBudgetConfigurationResponse, err +} + +func (a *budgetsPreviewImpl) Delete(ctx context.Context, request DeleteBudgetConfigurationRequest) error { + var deleteBudgetConfigurationResponse DeleteBudgetConfigurationResponse + path := fmt.Sprintf("/api/2.1preview/accounts/%v/budgets/%v", a.client.ConfiguredAccountID(), request.BudgetId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteBudgetConfigurationResponse) + return err +} + +func (a *budgetsPreviewImpl) Get(ctx context.Context, request GetBudgetConfigurationRequest) (*GetBudgetConfigurationResponse, error) { + var getBudgetConfigurationResponse GetBudgetConfigurationResponse + path := fmt.Sprintf("/api/2.1preview/accounts/%v/budgets/%v", a.client.ConfiguredAccountID(), request.BudgetId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getBudgetConfigurationResponse) + return &getBudgetConfigurationResponse, err +} + +// Get all budgets. +// +// Gets all budgets associated with this account. +func (a *budgetsPreviewImpl) List(ctx context.Context, request ListBudgetConfigurationsRequest) listing.Iterator[BudgetConfiguration] { + + getNextPage := func(ctx context.Context, req ListBudgetConfigurationsRequest) (*ListBudgetConfigurationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListBudgetConfigurationsResponse) []BudgetConfiguration { + return resp.Budgets + } + getNextReq := func(resp *ListBudgetConfigurationsResponse) *ListBudgetConfigurationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get all budgets. +// +// Gets all budgets associated with this account. +func (a *budgetsPreviewImpl) ListAll(ctx context.Context, request ListBudgetConfigurationsRequest) ([]BudgetConfiguration, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[BudgetConfiguration](ctx, iterator) +} +func (a *budgetsPreviewImpl) internalList(ctx context.Context, request ListBudgetConfigurationsRequest) (*ListBudgetConfigurationsResponse, error) { + var listBudgetConfigurationsResponse ListBudgetConfigurationsResponse + path := fmt.Sprintf("/api/2.1preview/accounts/%v/budgets", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listBudgetConfigurationsResponse) + return &listBudgetConfigurationsResponse, err +} + +func (a *budgetsPreviewImpl) Update(ctx context.Context, request UpdateBudgetConfigurationRequest) (*UpdateBudgetConfigurationResponse, error) { + var updateBudgetConfigurationResponse UpdateBudgetConfigurationResponse + path := fmt.Sprintf("/api/2.1preview/accounts/%v/budgets/%v", a.client.ConfiguredAccountID(), request.BudgetId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateBudgetConfigurationResponse) + return &updateBudgetConfigurationResponse, err +} + +// unexported type that holds implementations of just LogDeliveryPreview API methods +type logDeliveryPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *logDeliveryPreviewImpl) Create(ctx context.Context, request WrappedCreateLogDeliveryConfiguration) (*WrappedLogDeliveryConfiguration, error) { + var wrappedLogDeliveryConfiguration WrappedLogDeliveryConfiguration + path := fmt.Sprintf("/api/2.0preview/accounts/%v/log-delivery", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &wrappedLogDeliveryConfiguration) + return &wrappedLogDeliveryConfiguration, err +} + +func (a *logDeliveryPreviewImpl) Get(ctx context.Context, request GetLogDeliveryRequest) (*WrappedLogDeliveryConfiguration, error) { + var wrappedLogDeliveryConfiguration WrappedLogDeliveryConfiguration + path := fmt.Sprintf("/api/2.0preview/accounts/%v/log-delivery/%v", a.client.ConfiguredAccountID(), request.LogDeliveryConfigurationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &wrappedLogDeliveryConfiguration) + return &wrappedLogDeliveryConfiguration, err +} + +// Get all log delivery configurations. +// +// Gets all Databricks log delivery configurations associated with an account +// specified by ID. +func (a *logDeliveryPreviewImpl) List(ctx context.Context, request ListLogDeliveryRequest) listing.Iterator[LogDeliveryConfiguration] { + + getNextPage := func(ctx context.Context, req ListLogDeliveryRequest) (*WrappedLogDeliveryConfigurations, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *WrappedLogDeliveryConfigurations) []LogDeliveryConfiguration { + return resp.LogDeliveryConfigurations + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all log delivery configurations. +// +// Gets all Databricks log delivery configurations associated with an account +// specified by ID. +func (a *logDeliveryPreviewImpl) ListAll(ctx context.Context, request ListLogDeliveryRequest) ([]LogDeliveryConfiguration, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[LogDeliveryConfiguration](ctx, iterator) +} +func (a *logDeliveryPreviewImpl) internalList(ctx context.Context, request ListLogDeliveryRequest) (*WrappedLogDeliveryConfigurations, error) { + var wrappedLogDeliveryConfigurations WrappedLogDeliveryConfigurations + path := fmt.Sprintf("/api/2.0preview/accounts/%v/log-delivery", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &wrappedLogDeliveryConfigurations) + return &wrappedLogDeliveryConfigurations, err +} + +func (a *logDeliveryPreviewImpl) PatchStatus(ctx context.Context, request UpdateLogDeliveryConfigurationStatusRequest) error { + var patchStatusResponse PatchStatusResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/log-delivery/%v", a.client.ConfiguredAccountID(), request.LogDeliveryConfigurationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchStatusResponse) + return err +} + +// unexported type that holds implementations of just UsageDashboardsPreview API methods +type usageDashboardsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *usageDashboardsPreviewImpl) Create(ctx context.Context, request CreateBillingUsageDashboardRequest) (*CreateBillingUsageDashboardResponse, error) { + var createBillingUsageDashboardResponse CreateBillingUsageDashboardResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/dashboard", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createBillingUsageDashboardResponse) + return &createBillingUsageDashboardResponse, err +} + +func (a *usageDashboardsPreviewImpl) Get(ctx context.Context, request GetBillingUsageDashboardRequest) (*GetBillingUsageDashboardResponse, error) { + var getBillingUsageDashboardResponse GetBillingUsageDashboardResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/dashboard", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getBillingUsageDashboardResponse) + return &getBillingUsageDashboardResponse, err +} diff --git a/billing/v2preview/model.go b/billing/v2preview/model.go new file mode 100755 index 000000000..fa4bc493d --- /dev/null +++ b/billing/v2preview/model.go @@ -0,0 +1,1196 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package billingpreview + +import ( + "fmt" + "io" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type ActionConfiguration struct { + // Databricks action configuration ID. + ActionConfigurationId string `json:"action_configuration_id,omitempty"` + // The type of the action. + ActionType ActionConfigurationType `json:"action_type,omitempty"` + // Target for the action. For example, an email address. + Target string `json:"target,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ActionConfiguration) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ActionConfiguration) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ActionConfigurationType string + +const ActionConfigurationTypeEmailNotification ActionConfigurationType = `EMAIL_NOTIFICATION` + +// String representation for [fmt.Print] +func (f *ActionConfigurationType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ActionConfigurationType) Set(v string) error { + switch v { + case `EMAIL_NOTIFICATION`: + *f = ActionConfigurationType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EMAIL_NOTIFICATION"`, v) + } +} + +// Type always returns ActionConfigurationType to satisfy [pflag.Value] interface +func (f *ActionConfigurationType) Type() string { + return "ActionConfigurationType" +} + +type AlertConfiguration struct { + // Configured actions for this alert. These define what happens when an + // alert enters a triggered state. + ActionConfigurations []ActionConfiguration `json:"action_configurations,omitempty"` + // Databricks alert configuration ID. + AlertConfigurationId string `json:"alert_configuration_id,omitempty"` + // The threshold for the budget alert to determine if it is in a triggered + // state. The number is evaluated based on `quantity_type`. + QuantityThreshold string `json:"quantity_threshold,omitempty"` + // The way to calculate cost for this budget alert. This is what + // `quantity_threshold` is measured in. + QuantityType AlertConfigurationQuantityType `json:"quantity_type,omitempty"` + // The time window of usage data for the budget. + TimePeriod AlertConfigurationTimePeriod `json:"time_period,omitempty"` + // The evaluation method to determine when this budget alert is in a + // triggered state. + TriggerType AlertConfigurationTriggerType `json:"trigger_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AlertConfiguration) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertConfiguration) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertConfigurationQuantityType string + +const AlertConfigurationQuantityTypeListPriceDollarsUsd AlertConfigurationQuantityType = `LIST_PRICE_DOLLARS_USD` + +// String representation for [fmt.Print] +func (f *AlertConfigurationQuantityType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AlertConfigurationQuantityType) Set(v string) error { + switch v { + case `LIST_PRICE_DOLLARS_USD`: + *f = AlertConfigurationQuantityType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "LIST_PRICE_DOLLARS_USD"`, v) + } +} + +// Type always returns AlertConfigurationQuantityType to satisfy [pflag.Value] interface +func (f *AlertConfigurationQuantityType) Type() string { + return "AlertConfigurationQuantityType" +} + +type AlertConfigurationTimePeriod string + +const AlertConfigurationTimePeriodMonth AlertConfigurationTimePeriod = `MONTH` + +// String representation for [fmt.Print] +func (f *AlertConfigurationTimePeriod) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AlertConfigurationTimePeriod) Set(v string) error { + switch v { + case `MONTH`: + *f = AlertConfigurationTimePeriod(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MONTH"`, v) + } +} + +// Type always returns AlertConfigurationTimePeriod to satisfy [pflag.Value] interface +func (f *AlertConfigurationTimePeriod) Type() string { + return "AlertConfigurationTimePeriod" +} + +type AlertConfigurationTriggerType string + +const AlertConfigurationTriggerTypeCumulativeSpendingExceeded AlertConfigurationTriggerType = `CUMULATIVE_SPENDING_EXCEEDED` + +// String representation for [fmt.Print] +func (f *AlertConfigurationTriggerType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AlertConfigurationTriggerType) Set(v string) error { + switch v { + case `CUMULATIVE_SPENDING_EXCEEDED`: + *f = AlertConfigurationTriggerType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CUMULATIVE_SPENDING_EXCEEDED"`, v) + } +} + +// Type always returns AlertConfigurationTriggerType to satisfy [pflag.Value] interface +func (f *AlertConfigurationTriggerType) Type() string { + return "AlertConfigurationTriggerType" +} + +type BudgetConfiguration struct { + // Databricks account ID. + AccountId string `json:"account_id,omitempty"` + // Alerts to configure when this budget is in a triggered state. Budgets + // must have exactly one alert configuration. + AlertConfigurations []AlertConfiguration `json:"alert_configurations,omitempty"` + // Databricks budget configuration ID. + BudgetConfigurationId string `json:"budget_configuration_id,omitempty"` + // Creation time of this budget configuration. + CreateTime int64 `json:"create_time,omitempty"` + // Human-readable name of budget configuration. Max Length: 128 + DisplayName string `json:"display_name,omitempty"` + // Configured filters for this budget. These are applied to your account's + // usage to limit the scope of what is considered for this budget. Leave + // empty to include all usage for this account. All provided filters must be + // matched for usage to be included. + Filter *BudgetConfigurationFilter `json:"filter,omitempty"` + // Update time of this budget configuration. + UpdateTime int64 `json:"update_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *BudgetConfiguration) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s BudgetConfiguration) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type BudgetConfigurationFilter struct { + // A list of tag keys and values that will limit the budget to usage that + // includes those specific custom tags. Tags are case-sensitive and should + // be entered exactly as they appear in your usage data. + Tags []BudgetConfigurationFilterTagClause `json:"tags,omitempty"` + // If provided, usage must match with the provided Databricks workspace IDs. + WorkspaceId *BudgetConfigurationFilterWorkspaceIdClause `json:"workspace_id,omitempty"` +} + +type BudgetConfigurationFilterClause struct { + Operator BudgetConfigurationFilterOperator `json:"operator,omitempty"` + + Values []string `json:"values,omitempty"` +} + +type BudgetConfigurationFilterOperator string + +const BudgetConfigurationFilterOperatorIn BudgetConfigurationFilterOperator = `IN` + +// String representation for [fmt.Print] +func (f *BudgetConfigurationFilterOperator) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *BudgetConfigurationFilterOperator) Set(v string) error { + switch v { + case `IN`: + *f = BudgetConfigurationFilterOperator(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "IN"`, v) + } +} + +// Type always returns BudgetConfigurationFilterOperator to satisfy [pflag.Value] interface +func (f *BudgetConfigurationFilterOperator) Type() string { + return "BudgetConfigurationFilterOperator" +} + +type BudgetConfigurationFilterTagClause struct { + Key string `json:"key,omitempty"` + + Value *BudgetConfigurationFilterClause `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *BudgetConfigurationFilterTagClause) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s BudgetConfigurationFilterTagClause) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type BudgetConfigurationFilterWorkspaceIdClause struct { + Operator BudgetConfigurationFilterOperator `json:"operator,omitempty"` + + Values []int64 `json:"values,omitempty"` +} + +// Contains the BudgetPolicy details. +type BudgetPolicy struct { + // A list of tags defined by the customer. At most 20 entries are allowed + // per policy. + CustomTags []CustomPolicyTag `json:"custom_tags,omitempty"` + // The Id of the policy. This field is generated by Databricks and globally + // unique. + PolicyId string `json:"policy_id"` + // The name of the policy. - Must be unique among active policies. - Can + // contain only characters from the ISO 8859-1 (latin1) set. + PolicyName string `json:"policy_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *BudgetPolicy) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s BudgetPolicy) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateBillingUsageDashboardRequest struct { + // Workspace level usage dashboard shows usage data for the specified + // workspace ID. Global level usage dashboard shows usage data for all + // workspaces in the account. + DashboardType UsageDashboardType `json:"dashboard_type,omitempty"` + // The workspace ID of the workspace in which the usage dashboard is + // created. + WorkspaceId int64 `json:"workspace_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateBillingUsageDashboardRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateBillingUsageDashboardRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateBillingUsageDashboardResponse struct { + // The unique id of the usage dashboard. + DashboardId string `json:"dashboard_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateBillingUsageDashboardResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateBillingUsageDashboardResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateBudgetConfigurationBudget struct { + // Databricks account ID. + AccountId string `json:"account_id,omitempty"` + // Alerts to configure when this budget is in a triggered state. Budgets + // must have exactly one alert configuration. + AlertConfigurations []CreateBudgetConfigurationBudgetAlertConfigurations `json:"alert_configurations,omitempty"` + // Human-readable name of budget configuration. Max Length: 128 + DisplayName string `json:"display_name,omitempty"` + // Configured filters for this budget. These are applied to your account's + // usage to limit the scope of what is considered for this budget. Leave + // empty to include all usage for this account. All provided filters must be + // matched for usage to be included. + Filter *BudgetConfigurationFilter `json:"filter,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateBudgetConfigurationBudget) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateBudgetConfigurationBudget) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateBudgetConfigurationBudgetActionConfigurations struct { + // The type of the action. + ActionType ActionConfigurationType `json:"action_type,omitempty"` + // Target for the action. For example, an email address. + Target string `json:"target,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateBudgetConfigurationBudgetActionConfigurations) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateBudgetConfigurationBudgetActionConfigurations) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateBudgetConfigurationBudgetAlertConfigurations struct { + // Configured actions for this alert. These define what happens when an + // alert enters a triggered state. + ActionConfigurations []CreateBudgetConfigurationBudgetActionConfigurations `json:"action_configurations,omitempty"` + // The threshold for the budget alert to determine if it is in a triggered + // state. The number is evaluated based on `quantity_type`. + QuantityThreshold string `json:"quantity_threshold,omitempty"` + // The way to calculate cost for this budget alert. This is what + // `quantity_threshold` is measured in. + QuantityType AlertConfigurationQuantityType `json:"quantity_type,omitempty"` + // The time window of usage data for the budget. + TimePeriod AlertConfigurationTimePeriod `json:"time_period,omitempty"` + // The evaluation method to determine when this budget alert is in a + // triggered state. + TriggerType AlertConfigurationTriggerType `json:"trigger_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateBudgetConfigurationBudgetAlertConfigurations) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateBudgetConfigurationBudgetAlertConfigurations) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateBudgetConfigurationRequest struct { + // Properties of the new budget configuration. + Budget CreateBudgetConfigurationBudget `json:"budget"` +} + +type CreateBudgetConfigurationResponse struct { + // The created budget configuration. + Budget *BudgetConfiguration `json:"budget,omitempty"` +} + +// A request to create a BudgetPolicy. +type CreateBudgetPolicyRequest struct { + // A list of tags defined by the customer. At most 40 entries are allowed + // per policy. + CustomTags []CustomPolicyTag `json:"custom_tags,omitempty"` + // The name of the policy. - Must be unique among active policies. - Can + // contain only characters of 0-9, a-z, A-Z, -, =, ., :, /, @, _, +, + // whitespace. + PolicyName string `json:"policy_name,omitempty"` + // A unique identifier for this request. Restricted to 36 ASCII characters. + // A random UUID is recommended. This request is only idempotent if a + // `request_id` is provided. + RequestId string `json:"request_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateBudgetPolicyRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateBudgetPolicyRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateLogDeliveryConfigurationParams struct { + // The optional human-readable name of the log delivery configuration. + // Defaults to empty. + ConfigName string `json:"config_name,omitempty"` + // The ID for a method:credentials/create that represents the AWS IAM role + // with policy and trust relationship as described in the main billable + // usage documentation page. See [Configure billable usage delivery]. + // + // [Configure billable usage delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + CredentialsId string `json:"credentials_id"` + // The optional delivery path prefix within Amazon S3 storage. Defaults to + // empty, which means that logs are delivered to the root of the bucket. + // This must be a valid S3 object key. This must not start or end with a + // slash character. + DeliveryPathPrefix string `json:"delivery_path_prefix,omitempty"` + // This field applies only if `log_type` is `BILLABLE_USAGE`. This is the + // optional start month and year for delivery, specified in `YYYY-MM` + // format. Defaults to current year and month. `BILLABLE_USAGE` logs are not + // available for usage before March 2019 (`2019-03`). + DeliveryStartTime string `json:"delivery_start_time,omitempty"` + // Log delivery type. Supported values are: + // + // * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the + // CSV schema, see the [View billable usage]. + // + // * `AUDIT_LOGS` — Configure [audit log delivery]. For the JSON schema, + // see [Configure audit logging] + // + // [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html + // [audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + LogType LogType `json:"log_type"` + // The file type of log delivery. + // + // * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. Only the + // CSV (comma-separated values) format is supported. For the schema, see the + // [View billable usage] * If `log_type` is `AUDIT_LOGS`, this value must be + // `JSON`. Only the JSON (JavaScript Object Notation) format is supported. + // For the schema, see the [Configuring audit logs]. + // + // [Configuring audit logs]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html + OutputFormat OutputFormat `json:"output_format"` + // Status of log delivery configuration. Set to `ENABLED` (enabled) or + // `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable + // the configuration](#operation/patch-log-delivery-config-status) later. + // Deletion of a configuration is not supported, so disable a log delivery + // configuration that is no longer needed. + Status LogDeliveryConfigStatus `json:"status,omitempty"` + // The ID for a method:storage/create that represents the S3 bucket with + // bucket policy as described in the main billable usage documentation page. + // See [Configure billable usage delivery]. + // + // [Configure billable usage delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + StorageConfigurationId string `json:"storage_configuration_id"` + // Optional filter that specifies workspace IDs to deliver logs for. By + // default the workspace filter is empty and log delivery applies at the + // account level, delivering workspace-level logs for all workspaces in your + // account, plus account level logs. You can optionally set this field to an + // array of workspace IDs (each one is an `int64`) to which log delivery + // should apply, in which case only workspace-level logs relating to the + // specified workspaces are delivered. If you plan to use different log + // delivery configurations for different workspaces, set this field + // explicitly. Be aware that delivery configurations mentioning specific + // workspaces won't apply to new workspaces created in the future, and + // delivery won't include account level logs. For some types of Databricks + // deployments there is only one workspace per account ID, so this field is + // unnecessary. + WorkspaceIdsFilter []int64 `json:"workspace_ids_filter,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateLogDeliveryConfigurationParams) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateLogDeliveryConfigurationParams) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CustomPolicyTag struct { + // The key of the tag. - Must be unique among all custom tags of the same + // policy - Cannot be “budget-policy-name”, “budget-policy-id” or + // "budget-policy-resolution-result" - these tags are preserved. + // + // - Follows the regex pattern defined in + // cluster-common/conf/src/ClusterTagConstraints.scala + // (https://src.dev.databricks.com/databricks/universe@1647196627c8dc7b4152ad098a94b86484b93a6c/-/blob/cluster-common/conf/src/ClusterTagConstraints.scala?L17) + Key string `json:"key"` + // The value of the tag. + // + // - Follows the regex pattern defined in + // cluster-common/conf/src/ClusterTagConstraints.scala + // (https://src.dev.databricks.com/databricks/universe@1647196627c8dc7b4152ad098a94b86484b93a6c/-/blob/cluster-common/conf/src/ClusterTagConstraints.scala?L24) + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CustomPolicyTag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CustomPolicyTag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete budget +type DeleteBudgetConfigurationRequest struct { + // The Databricks budget configuration ID. + BudgetId string `json:"-" url:"-"` +} + +type DeleteBudgetConfigurationResponse struct { +} + +// Delete a budget policy +type DeleteBudgetPolicyRequest struct { + // The Id of the policy. + PolicyId string `json:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// The status string for log delivery. Possible values are: * `CREATED`: There +// were no log delivery attempts since the config was created. * `SUCCEEDED`: +// The latest attempt of log delivery has succeeded completely. * +// `USER_FAILURE`: The latest attempt of log delivery failed because of +// misconfiguration of customer provided permissions on role or storage. * +// `SYSTEM_FAILURE`: The latest attempt of log delivery failed because of an +// Databricks internal error. Contact support if it doesn't go away soon. * +// `NOT_FOUND`: The log delivery status as the configuration has been disabled +// since the release of this feature or there are no workspaces in the account. +type DeliveryStatus string + +// There were no log delivery attempts since the config was created. +const DeliveryStatusCreated DeliveryStatus = `CREATED` + +// The log delivery status as the configuration has been disabled since the +// release of this feature or there are no workspaces in the account. +const DeliveryStatusNotFound DeliveryStatus = `NOT_FOUND` + +// The latest attempt of log delivery has succeeded completely. +const DeliveryStatusSucceeded DeliveryStatus = `SUCCEEDED` + +// The latest attempt of log delivery failed because of an internal +// error. Contact support if it doesn't go away soon. +const DeliveryStatusSystemFailure DeliveryStatus = `SYSTEM_FAILURE` + +// The latest attempt of log delivery failed because of misconfiguration of +// customer provided permissions on role or storage. +const DeliveryStatusUserFailure DeliveryStatus = `USER_FAILURE` + +// String representation for [fmt.Print] +func (f *DeliveryStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DeliveryStatus) Set(v string) error { + switch v { + case `CREATED`, `NOT_FOUND`, `SUCCEEDED`, `SYSTEM_FAILURE`, `USER_FAILURE`: + *f = DeliveryStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CREATED", "NOT_FOUND", "SUCCEEDED", "SYSTEM_FAILURE", "USER_FAILURE"`, v) + } +} + +// Type always returns DeliveryStatus to satisfy [pflag.Value] interface +func (f *DeliveryStatus) Type() string { + return "DeliveryStatus" +} + +// Return billable usage logs +type DownloadRequest struct { + // Format: `YYYY-MM`. Last month to return billable usage logs for. This + // field is required. + EndMonth string `json:"-" url:"end_month"` + // Specify whether to include personally identifiable information in the + // billable usage logs, for example the email addresses of cluster creators. + // Handle this information with care. Defaults to false. + PersonalData bool `json:"-" url:"personal_data,omitempty"` + // Format: `YYYY-MM`. First month to return billable usage logs for. This + // field is required. + StartMonth string `json:"-" url:"start_month"` + + ForceSendFields []string `json:"-"` +} + +func (s *DownloadRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DownloadRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DownloadResponse struct { + Contents io.ReadCloser `json:"-"` +} + +// Structured representation of a filter to be applied to a list of policies. +// All specified filters will be applied in conjunction. +type Filter struct { + // The policy creator user id to be filtered on. If unspecified, all + // policies will be returned. + CreatorUserId int64 `json:"creator_user_id,omitempty" url:"creator_user_id,omitempty"` + // The policy creator user name to be filtered on. If unspecified, all + // policies will be returned. + CreatorUserName string `json:"creator_user_name,omitempty" url:"creator_user_name,omitempty"` + // The partial name of policies to be filtered on. If unspecified, all + // policies will be returned. + PolicyName string `json:"policy_name,omitempty" url:"policy_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Filter) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Filter) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get usage dashboard +type GetBillingUsageDashboardRequest struct { + // Workspace level usage dashboard shows usage data for the specified + // workspace ID. Global level usage dashboard shows usage data for all + // workspaces in the account. + DashboardType UsageDashboardType `json:"-" url:"dashboard_type,omitempty"` + // The workspace ID of the workspace in which the usage dashboard is + // created. + WorkspaceId int64 `json:"-" url:"workspace_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetBillingUsageDashboardRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetBillingUsageDashboardRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GetBillingUsageDashboardResponse struct { + // The unique id of the usage dashboard. + DashboardId string `json:"dashboard_id,omitempty"` + // The URL of the usage dashboard. + DashboardUrl string `json:"dashboard_url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetBillingUsageDashboardResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetBillingUsageDashboardResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get budget +type GetBudgetConfigurationRequest struct { + // The budget configuration ID + BudgetId string `json:"-" url:"-"` +} + +type GetBudgetConfigurationResponse struct { + Budget *BudgetConfiguration `json:"budget,omitempty"` +} + +// Get a budget policy +type GetBudgetPolicyRequest struct { + // The Id of the policy. + PolicyId string `json:"-" url:"-"` +} + +// Get log delivery configuration +type GetLogDeliveryRequest struct { + // Databricks log delivery configuration ID + LogDeliveryConfigurationId string `json:"-" url:"-"` +} + +// The limit configuration of the policy. Limit configuration provide a budget +// policy level cost control by enforcing the limit. +type LimitConfig struct { +} + +// Get all budgets +type ListBudgetConfigurationsRequest struct { + // A page token received from a previous get all budget configurations call. + // This token can be used to retrieve the subsequent page. Requests first + // page if absent. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListBudgetConfigurationsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListBudgetConfigurationsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListBudgetConfigurationsResponse struct { + Budgets []BudgetConfiguration `json:"budgets,omitempty"` + // Token which can be sent as `page_token` to retrieve the next page of + // results. If this field is omitted, there are no subsequent budgets. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListBudgetConfigurationsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListBudgetConfigurationsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List policies +type ListBudgetPoliciesRequest struct { + // A filter to apply to the list of policies. + FilterBy *Filter `json:"-" url:"filter_by,omitempty"` + // The maximum number of budget policies to return. If unspecified, at most + // 100 budget policies will be returned. The maximum value is 1000; values + // above 1000 will be coerced to 1000. + PageSize int `json:"-" url:"page_size,omitempty"` + // A page token, received from a previous `ListServerlessPolicies` call. + // Provide this to retrieve the subsequent page. If unspecified, the first + // page will be returned. + // + // When paginating, all other parameters provided to + // `ListServerlessPoliciesRequest` must match the call that provided the + // page token. + PageToken string `json:"-" url:"page_token,omitempty"` + // The sort specification. + SortSpec *SortSpec `json:"-" url:"sort_spec,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListBudgetPoliciesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListBudgetPoliciesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// A list of policies. +type ListBudgetPoliciesResponse struct { + // A token that can be sent as `page_token` to retrieve the next page. If + // this field is omitted, there are no subsequent pages. + NextPageToken string `json:"next_page_token,omitempty"` + + Policies []BudgetPolicy `json:"policies,omitempty"` + // A token that can be sent as `page_token` to retrieve the previous page. + // In this field is omitted, there are no previous pages. + PreviousPageToken string `json:"previous_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListBudgetPoliciesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListBudgetPoliciesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get all log delivery configurations +type ListLogDeliveryRequest struct { + // Filter by credential configuration ID. + CredentialsId string `json:"-" url:"credentials_id,omitempty"` + // Filter by status `ENABLED` or `DISABLED`. + Status LogDeliveryConfigStatus `json:"-" url:"status,omitempty"` + // Filter by storage configuration ID. + StorageConfigurationId string `json:"-" url:"storage_configuration_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListLogDeliveryRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListLogDeliveryRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Status of log delivery configuration. Set to `ENABLED` (enabled) or +// `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable the +// configuration](#operation/patch-log-delivery-config-status) later. Deletion +// of a configuration is not supported, so disable a log delivery configuration +// that is no longer needed. +type LogDeliveryConfigStatus string + +const LogDeliveryConfigStatusDisabled LogDeliveryConfigStatus = `DISABLED` + +const LogDeliveryConfigStatusEnabled LogDeliveryConfigStatus = `ENABLED` + +// String representation for [fmt.Print] +func (f *LogDeliveryConfigStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *LogDeliveryConfigStatus) Set(v string) error { + switch v { + case `DISABLED`, `ENABLED`: + *f = LogDeliveryConfigStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISABLED", "ENABLED"`, v) + } +} + +// Type always returns LogDeliveryConfigStatus to satisfy [pflag.Value] interface +func (f *LogDeliveryConfigStatus) Type() string { + return "LogDeliveryConfigStatus" +} + +type LogDeliveryConfiguration struct { + // The Databricks account ID that hosts the log delivery configuration. + AccountId string `json:"account_id,omitempty"` + // Databricks log delivery configuration ID. + ConfigId string `json:"config_id,omitempty"` + // The optional human-readable name of the log delivery configuration. + // Defaults to empty. + ConfigName string `json:"config_name,omitempty"` + // Time in epoch milliseconds when the log delivery configuration was + // created. + CreationTime int64 `json:"creation_time,omitempty"` + // The ID for a method:credentials/create that represents the AWS IAM role + // with policy and trust relationship as described in the main billable + // usage documentation page. See [Configure billable usage delivery]. + // + // [Configure billable usage delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + CredentialsId string `json:"credentials_id,omitempty"` + // The optional delivery path prefix within Amazon S3 storage. Defaults to + // empty, which means that logs are delivered to the root of the bucket. + // This must be a valid S3 object key. This must not start or end with a + // slash character. + DeliveryPathPrefix string `json:"delivery_path_prefix,omitempty"` + // This field applies only if `log_type` is `BILLABLE_USAGE`. This is the + // optional start month and year for delivery, specified in `YYYY-MM` + // format. Defaults to current year and month. `BILLABLE_USAGE` logs are not + // available for usage before March 2019 (`2019-03`). + DeliveryStartTime string `json:"delivery_start_time,omitempty"` + // Databricks log delivery status. + LogDeliveryStatus *LogDeliveryStatus `json:"log_delivery_status,omitempty"` + // Log delivery type. Supported values are: + // + // * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the + // CSV schema, see the [View billable usage]. + // + // * `AUDIT_LOGS` — Configure [audit log delivery]. For the JSON schema, + // see [Configure audit logging] + // + // [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html + // [audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + LogType LogType `json:"log_type,omitempty"` + // The file type of log delivery. + // + // * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. Only the + // CSV (comma-separated values) format is supported. For the schema, see the + // [View billable usage] * If `log_type` is `AUDIT_LOGS`, this value must be + // `JSON`. Only the JSON (JavaScript Object Notation) format is supported. + // For the schema, see the [Configuring audit logs]. + // + // [Configuring audit logs]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html + OutputFormat OutputFormat `json:"output_format,omitempty"` + // Status of log delivery configuration. Set to `ENABLED` (enabled) or + // `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable + // the configuration](#operation/patch-log-delivery-config-status) later. + // Deletion of a configuration is not supported, so disable a log delivery + // configuration that is no longer needed. + Status LogDeliveryConfigStatus `json:"status,omitempty"` + // The ID for a method:storage/create that represents the S3 bucket with + // bucket policy as described in the main billable usage documentation page. + // See [Configure billable usage delivery]. + // + // [Configure billable usage delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + StorageConfigurationId string `json:"storage_configuration_id,omitempty"` + // Time in epoch milliseconds when the log delivery configuration was + // updated. + UpdateTime int64 `json:"update_time,omitempty"` + // Optional filter that specifies workspace IDs to deliver logs for. By + // default the workspace filter is empty and log delivery applies at the + // account level, delivering workspace-level logs for all workspaces in your + // account, plus account level logs. You can optionally set this field to an + // array of workspace IDs (each one is an `int64`) to which log delivery + // should apply, in which case only workspace-level logs relating to the + // specified workspaces are delivered. If you plan to use different log + // delivery configurations for different workspaces, set this field + // explicitly. Be aware that delivery configurations mentioning specific + // workspaces won't apply to new workspaces created in the future, and + // delivery won't include account level logs. For some types of Databricks + // deployments there is only one workspace per account ID, so this field is + // unnecessary. + WorkspaceIdsFilter []int64 `json:"workspace_ids_filter,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogDeliveryConfiguration) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogDeliveryConfiguration) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Databricks log delivery status. +type LogDeliveryStatus struct { + // The UTC time for the latest log delivery attempt. + LastAttemptTime string `json:"last_attempt_time,omitempty"` + // The UTC time for the latest successful log delivery. + LastSuccessfulAttemptTime string `json:"last_successful_attempt_time,omitempty"` + // Informative message about the latest log delivery attempt. If the log + // delivery fails with USER_FAILURE, error details will be provided for + // fixing misconfigurations in cloud permissions. + Message string `json:"message,omitempty"` + // The status string for log delivery. Possible values are: * `CREATED`: + // There were no log delivery attempts since the config was created. * + // `SUCCEEDED`: The latest attempt of log delivery has succeeded completely. + // * `USER_FAILURE`: The latest attempt of log delivery failed because of + // misconfiguration of customer provided permissions on role or storage. * + // `SYSTEM_FAILURE`: The latest attempt of log delivery failed because of an + // Databricks internal error. Contact support if it doesn't go away soon. * + // `NOT_FOUND`: The log delivery status as the configuration has been + // disabled since the release of this feature or there are no workspaces in + // the account. + Status DeliveryStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogDeliveryStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogDeliveryStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Log delivery type. Supported values are: +// +// * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the CSV +// schema, see the [View billable usage]. +// +// * `AUDIT_LOGS` — Configure [audit log delivery]. For the JSON schema, see +// [Configure audit logging] +// +// [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html +// [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html +// [audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html +// [billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html +type LogType string + +const LogTypeAuditLogs LogType = `AUDIT_LOGS` + +const LogTypeBillableUsage LogType = `BILLABLE_USAGE` + +// String representation for [fmt.Print] +func (f *LogType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *LogType) Set(v string) error { + switch v { + case `AUDIT_LOGS`, `BILLABLE_USAGE`: + *f = LogType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AUDIT_LOGS", "BILLABLE_USAGE"`, v) + } +} + +// Type always returns LogType to satisfy [pflag.Value] interface +func (f *LogType) Type() string { + return "LogType" +} + +// The file type of log delivery. +// +// * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. Only the CSV +// (comma-separated values) format is supported. For the schema, see the [View +// billable usage] * If `log_type` is `AUDIT_LOGS`, this value must be `JSON`. +// Only the JSON (JavaScript Object Notation) format is supported. For the +// schema, see the [Configuring audit logs]. +// +// [Configuring audit logs]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html +// [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html +type OutputFormat string + +const OutputFormatCsv OutputFormat = `CSV` + +const OutputFormatJson OutputFormat = `JSON` + +// String representation for [fmt.Print] +func (f *OutputFormat) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *OutputFormat) Set(v string) error { + switch v { + case `CSV`, `JSON`: + *f = OutputFormat(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CSV", "JSON"`, v) + } +} + +// Type always returns OutputFormat to satisfy [pflag.Value] interface +func (f *OutputFormat) Type() string { + return "OutputFormat" +} + +type PatchStatusResponse struct { +} + +type SortSpec struct { + // Whether to sort in descending order. + Descending bool `json:"descending,omitempty" url:"descending,omitempty"` + // The filed to sort by + Field SortSpecField `json:"field,omitempty" url:"field,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SortSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SortSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SortSpecField string + +const SortSpecFieldPolicyName SortSpecField = `POLICY_NAME` + +// String representation for [fmt.Print] +func (f *SortSpecField) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SortSpecField) Set(v string) error { + switch v { + case `POLICY_NAME`: + *f = SortSpecField(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "POLICY_NAME"`, v) + } +} + +// Type always returns SortSpecField to satisfy [pflag.Value] interface +func (f *SortSpecField) Type() string { + return "SortSpecField" +} + +type UpdateBudgetConfigurationBudget struct { + // Databricks account ID. + AccountId string `json:"account_id,omitempty"` + // Alerts to configure when this budget is in a triggered state. Budgets + // must have exactly one alert configuration. + AlertConfigurations []AlertConfiguration `json:"alert_configurations,omitempty"` + // Databricks budget configuration ID. + BudgetConfigurationId string `json:"budget_configuration_id,omitempty"` + // Human-readable name of budget configuration. Max Length: 128 + DisplayName string `json:"display_name,omitempty"` + // Configured filters for this budget. These are applied to your account's + // usage to limit the scope of what is considered for this budget. Leave + // empty to include all usage for this account. All provided filters must be + // matched for usage to be included. + Filter *BudgetConfigurationFilter `json:"filter,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateBudgetConfigurationBudget) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateBudgetConfigurationBudget) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateBudgetConfigurationRequest struct { + // The updated budget. This will overwrite the budget specified by the + // budget ID. + Budget UpdateBudgetConfigurationBudget `json:"budget"` + // The Databricks budget configuration ID. + BudgetId string `json:"-" url:"-"` +} + +type UpdateBudgetConfigurationResponse struct { + // The updated budget. + Budget *BudgetConfiguration `json:"budget,omitempty"` +} + +// Update a budget policy +type UpdateBudgetPolicyRequest struct { + // DEPRECATED. This is redundant field as LimitConfig is part of the + // BudgetPolicy + LimitConfig *LimitConfig `json:"-" url:"limit_config,omitempty"` + // Contains the BudgetPolicy details. + Policy *BudgetPolicy `json:"policy,omitempty"` + // The Id of the policy. This field is generated by Databricks and globally + // unique. + PolicyId string `json:"-" url:"-"` +} + +type UpdateLogDeliveryConfigurationStatusRequest struct { + // Databricks log delivery configuration ID + LogDeliveryConfigurationId string `json:"-" url:"-"` + // Status of log delivery configuration. Set to `ENABLED` (enabled) or + // `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable + // the configuration](#operation/patch-log-delivery-config-status) later. + // Deletion of a configuration is not supported, so disable a log delivery + // configuration that is no longer needed. + Status LogDeliveryConfigStatus `json:"status"` +} + +type UsageDashboardType string + +const UsageDashboardTypeUsageDashboardTypeGlobal UsageDashboardType = `USAGE_DASHBOARD_TYPE_GLOBAL` + +const UsageDashboardTypeUsageDashboardTypeWorkspace UsageDashboardType = `USAGE_DASHBOARD_TYPE_WORKSPACE` + +// String representation for [fmt.Print] +func (f *UsageDashboardType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UsageDashboardType) Set(v string) error { + switch v { + case `USAGE_DASHBOARD_TYPE_GLOBAL`, `USAGE_DASHBOARD_TYPE_WORKSPACE`: + *f = UsageDashboardType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "USAGE_DASHBOARD_TYPE_GLOBAL", "USAGE_DASHBOARD_TYPE_WORKSPACE"`, v) + } +} + +// Type always returns UsageDashboardType to satisfy [pflag.Value] interface +func (f *UsageDashboardType) Type() string { + return "UsageDashboardType" +} + +type WrappedCreateLogDeliveryConfiguration struct { + LogDeliveryConfiguration *CreateLogDeliveryConfigurationParams `json:"log_delivery_configuration,omitempty"` +} + +type WrappedLogDeliveryConfiguration struct { + LogDeliveryConfiguration *LogDeliveryConfiguration `json:"log_delivery_configuration,omitempty"` +} + +type WrappedLogDeliveryConfigurations struct { + LogDeliveryConfigurations []LogDeliveryConfiguration `json:"log_delivery_configurations,omitempty"` +} diff --git a/catalog/v2preview/api.go b/catalog/v2preview/api.go new file mode 100755 index 000000000..e5a848ef1 --- /dev/null +++ b/catalog/v2preview/api.go @@ -0,0 +1,3409 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Account Metastore Assignments Preview, Account Metastores Preview, Account Storage Credentials Preview, Artifact Allowlists Preview, Catalogs Preview, Connections Preview, Credentials Preview, External Locations Preview, Functions Preview, Grants Preview, Metastores Preview, Model Versions Preview, Online Tables Preview, Quality Monitors Preview, Registered Models Preview, Resource Quotas Preview, Schemas Preview, Storage Credentials Preview, System Schemas Preview, Table Constraints Preview, Tables Preview, Temporary Table Credentials Preview, Volumes Preview, Workspace Bindings Preview, etc. +package catalogpreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type AccountMetastoreAssignmentsPreviewInterface interface { + + // Assigns a workspace to a metastore. + // + // Creates an assignment to a metastore for a workspace + Create(ctx context.Context, request AccountsCreateMetastoreAssignment) error + + // Delete a metastore assignment. + // + // Deletes a metastore assignment to a workspace, leaving the workspace with no + // metastore. + Delete(ctx context.Context, request DeleteAccountMetastoreAssignmentRequest) error + + // Delete a metastore assignment. + // + // Deletes a metastore assignment to a workspace, leaving the workspace with no + // metastore. + DeleteByWorkspaceIdAndMetastoreId(ctx context.Context, workspaceId int64, metastoreId string) error + + // Gets the metastore assignment for a workspace. + // + // Gets the metastore assignment, if any, for the workspace specified by ID. If + // the workspace is assigned a metastore, the mappig will be returned. If no + // metastore is assigned to the workspace, the assignment will not be found and + // a 404 returned. + Get(ctx context.Context, request GetAccountMetastoreAssignmentRequest) (*AccountsMetastoreAssignment, error) + + // Gets the metastore assignment for a workspace. + // + // Gets the metastore assignment, if any, for the workspace specified by ID. If + // the workspace is assigned a metastore, the mappig will be returned. If no + // metastore is assigned to the workspace, the assignment will not be found and + // a 404 returned. + GetByWorkspaceId(ctx context.Context, workspaceId int64) (*AccountsMetastoreAssignment, error) + + // Get all workspaces assigned to a metastore. + // + // Gets a list of all Databricks workspace IDs that have been assigned to given + // metastore. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) listing.Iterator[int64] + + // Get all workspaces assigned to a metastore. + // + // Gets a list of all Databricks workspace IDs that have been assigned to given + // metastore. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) ([]int64, error) + + // Get all workspaces assigned to a metastore. + // + // Gets a list of all Databricks workspace IDs that have been assigned to given + // metastore. + ListByMetastoreId(ctx context.Context, metastoreId string) (*ListAccountMetastoreAssignmentsResponse, error) + + // Updates a metastore assignment to a workspaces. + // + // Updates an assignment to a metastore for a workspace. Currently, only the + // default catalog may be updated. + Update(ctx context.Context, request AccountsUpdateMetastoreAssignment) error +} + +func NewAccountMetastoreAssignmentsPreview(client *client.DatabricksClient) *AccountMetastoreAssignmentsPreviewAPI { + return &AccountMetastoreAssignmentsPreviewAPI{ + accountMetastoreAssignmentsPreviewImpl: accountMetastoreAssignmentsPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage metastore assignments to a workspace. +type AccountMetastoreAssignmentsPreviewAPI struct { + accountMetastoreAssignmentsPreviewImpl +} + +// Delete a metastore assignment. +// +// Deletes a metastore assignment to a workspace, leaving the workspace with no +// metastore. +func (a *AccountMetastoreAssignmentsPreviewAPI) DeleteByWorkspaceIdAndMetastoreId(ctx context.Context, workspaceId int64, metastoreId string) error { + return a.accountMetastoreAssignmentsPreviewImpl.Delete(ctx, DeleteAccountMetastoreAssignmentRequest{ + WorkspaceId: workspaceId, + MetastoreId: metastoreId, + }) +} + +// Gets the metastore assignment for a workspace. +// +// Gets the metastore assignment, if any, for the workspace specified by ID. If +// the workspace is assigned a metastore, the mappig will be returned. If no +// metastore is assigned to the workspace, the assignment will not be found and +// a 404 returned. +func (a *AccountMetastoreAssignmentsPreviewAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*AccountsMetastoreAssignment, error) { + return a.accountMetastoreAssignmentsPreviewImpl.Get(ctx, GetAccountMetastoreAssignmentRequest{ + WorkspaceId: workspaceId, + }) +} + +// Get all workspaces assigned to a metastore. +// +// Gets a list of all Databricks workspace IDs that have been assigned to given +// metastore. +func (a *AccountMetastoreAssignmentsPreviewAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListAccountMetastoreAssignmentsResponse, error) { + return a.accountMetastoreAssignmentsPreviewImpl.internalList(ctx, ListAccountMetastoreAssignmentsRequest{ + MetastoreId: metastoreId, + }) +} + +type AccountMetastoresPreviewInterface interface { + + // Create metastore. + // + // Creates a Unity Catalog metastore. + Create(ctx context.Context, request AccountsCreateMetastore) (*AccountsMetastoreInfo, error) + + // Delete a metastore. + // + // Deletes a Unity Catalog metastore for an account, both specified by ID. + Delete(ctx context.Context, request DeleteAccountMetastoreRequest) error + + // Delete a metastore. + // + // Deletes a Unity Catalog metastore for an account, both specified by ID. + DeleteByMetastoreId(ctx context.Context, metastoreId string) error + + // Get a metastore. + // + // Gets a Unity Catalog metastore from an account, both specified by ID. + Get(ctx context.Context, request GetAccountMetastoreRequest) (*AccountsMetastoreInfo, error) + + // Get a metastore. + // + // Gets a Unity Catalog metastore from an account, both specified by ID. + GetByMetastoreId(ctx context.Context, metastoreId string) (*AccountsMetastoreInfo, error) + + // Get all metastores associated with an account. + // + // Gets all Unity Catalog metastores associated with an account specified by ID. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context) listing.Iterator[MetastoreInfo] + + // Get all metastores associated with an account. + // + // Gets all Unity Catalog metastores associated with an account specified by ID. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context) ([]MetastoreInfo, error) + + // Update a metastore. + // + // Updates an existing Unity Catalog metastore. + Update(ctx context.Context, request AccountsUpdateMetastore) (*AccountsMetastoreInfo, error) +} + +func NewAccountMetastoresPreview(client *client.DatabricksClient) *AccountMetastoresPreviewAPI { + return &AccountMetastoresPreviewAPI{ + accountMetastoresPreviewImpl: accountMetastoresPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage Unity Catalog metastores for an account. A metastore +// contains catalogs that can be associated with workspaces +type AccountMetastoresPreviewAPI struct { + accountMetastoresPreviewImpl +} + +// Delete a metastore. +// +// Deletes a Unity Catalog metastore for an account, both specified by ID. +func (a *AccountMetastoresPreviewAPI) DeleteByMetastoreId(ctx context.Context, metastoreId string) error { + return a.accountMetastoresPreviewImpl.Delete(ctx, DeleteAccountMetastoreRequest{ + MetastoreId: metastoreId, + }) +} + +// Get a metastore. +// +// Gets a Unity Catalog metastore from an account, both specified by ID. +func (a *AccountMetastoresPreviewAPI) GetByMetastoreId(ctx context.Context, metastoreId string) (*AccountsMetastoreInfo, error) { + return a.accountMetastoresPreviewImpl.Get(ctx, GetAccountMetastoreRequest{ + MetastoreId: metastoreId, + }) +} + +type AccountStorageCredentialsPreviewInterface interface { + + // Create a storage credential. + // + // Creates a new storage credential. The request object is specific to the + // cloud: + // + // * **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for Azure + // credentials * **GcpServiceAcountKey** for GCP credentials. + // + // The caller must be a metastore admin and have the + // **CREATE_STORAGE_CREDENTIAL** privilege on the metastore. + Create(ctx context.Context, request AccountsCreateStorageCredential) (*AccountsStorageCredentialInfo, error) + + // Delete a storage credential. + // + // Deletes a storage credential from the metastore. The caller must be an owner + // of the storage credential. + Delete(ctx context.Context, request DeleteAccountStorageCredentialRequest) error + + // Delete a storage credential. + // + // Deletes a storage credential from the metastore. The caller must be an owner + // of the storage credential. + DeleteByMetastoreIdAndStorageCredentialName(ctx context.Context, metastoreId string, storageCredentialName string) error + + // Gets the named storage credential. + // + // Gets a storage credential from the metastore. The caller must be a metastore + // admin, the owner of the storage credential, or have a level of privilege on + // the storage credential. + Get(ctx context.Context, request GetAccountStorageCredentialRequest) (*AccountsStorageCredentialInfo, error) + + // Gets the named storage credential. + // + // Gets a storage credential from the metastore. The caller must be a metastore + // admin, the owner of the storage credential, or have a level of privilege on + // the storage credential. + GetByMetastoreIdAndStorageCredentialName(ctx context.Context, metastoreId string, storageCredentialName string) (*AccountsStorageCredentialInfo, error) + + // Get all storage credentials assigned to a metastore. + // + // Gets a list of all storage credentials that have been assigned to given + // metastore. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAccountStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] + + // Get all storage credentials assigned to a metastore. + // + // Gets a list of all storage credentials that have been assigned to given + // metastore. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAccountStorageCredentialsRequest) ([]StorageCredentialInfo, error) + + // Get all storage credentials assigned to a metastore. + // + // Gets a list of all storage credentials that have been assigned to given + // metastore. + ListByMetastoreId(ctx context.Context, metastoreId string) (*ListAccountStorageCredentialsResponse, error) + + // Updates a storage credential. + // + // Updates a storage credential on the metastore. The caller must be the owner + // of the storage credential. If the caller is a metastore admin, only the + // __owner__ credential can be changed. + Update(ctx context.Context, request AccountsUpdateStorageCredential) (*AccountsStorageCredentialInfo, error) +} + +func NewAccountStorageCredentialsPreview(client *client.DatabricksClient) *AccountStorageCredentialsPreviewAPI { + return &AccountStorageCredentialsPreviewAPI{ + accountStorageCredentialsPreviewImpl: accountStorageCredentialsPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage storage credentials for a particular metastore. +type AccountStorageCredentialsPreviewAPI struct { + accountStorageCredentialsPreviewImpl +} + +// Delete a storage credential. +// +// Deletes a storage credential from the metastore. The caller must be an owner +// of the storage credential. +func (a *AccountStorageCredentialsPreviewAPI) DeleteByMetastoreIdAndStorageCredentialName(ctx context.Context, metastoreId string, storageCredentialName string) error { + return a.accountStorageCredentialsPreviewImpl.Delete(ctx, DeleteAccountStorageCredentialRequest{ + MetastoreId: metastoreId, + StorageCredentialName: storageCredentialName, + }) +} + +// Gets the named storage credential. +// +// Gets a storage credential from the metastore. The caller must be a metastore +// admin, the owner of the storage credential, or have a level of privilege on +// the storage credential. +func (a *AccountStorageCredentialsPreviewAPI) GetByMetastoreIdAndStorageCredentialName(ctx context.Context, metastoreId string, storageCredentialName string) (*AccountsStorageCredentialInfo, error) { + return a.accountStorageCredentialsPreviewImpl.Get(ctx, GetAccountStorageCredentialRequest{ + MetastoreId: metastoreId, + StorageCredentialName: storageCredentialName, + }) +} + +// Get all storage credentials assigned to a metastore. +// +// Gets a list of all storage credentials that have been assigned to given +// metastore. +func (a *AccountStorageCredentialsPreviewAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListAccountStorageCredentialsResponse, error) { + return a.accountStorageCredentialsPreviewImpl.internalList(ctx, ListAccountStorageCredentialsRequest{ + MetastoreId: metastoreId, + }) +} + +type ArtifactAllowlistsPreviewInterface interface { + + // Get an artifact allowlist. + // + // Get the artifact allowlist of a certain artifact type. The caller must be a + // metastore admin or have the **MANAGE ALLOWLIST** privilege on the metastore. + Get(ctx context.Context, request GetArtifactAllowlistRequest) (*ArtifactAllowlistInfo, error) + + // Get an artifact allowlist. + // + // Get the artifact allowlist of a certain artifact type. The caller must be a + // metastore admin or have the **MANAGE ALLOWLIST** privilege on the metastore. + GetByArtifactType(ctx context.Context, artifactType ArtifactType) (*ArtifactAllowlistInfo, error) + + // Set an artifact allowlist. + // + // Set the artifact allowlist of a certain artifact type. The whole artifact + // allowlist is replaced with the new allowlist. The caller must be a metastore + // admin or have the **MANAGE ALLOWLIST** privilege on the metastore. + Update(ctx context.Context, request SetArtifactAllowlist) (*ArtifactAllowlistInfo, error) +} + +func NewArtifactAllowlistsPreview(client *client.DatabricksClient) *ArtifactAllowlistsPreviewAPI { + return &ArtifactAllowlistsPreviewAPI{ + artifactAllowlistsPreviewImpl: artifactAllowlistsPreviewImpl{ + client: client, + }, + } +} + +// In Databricks Runtime 13.3 and above, you can add libraries and init scripts +// to the `allowlist` in UC so that users can leverage these artifacts on +// compute configured with shared access mode. +type ArtifactAllowlistsPreviewAPI struct { + artifactAllowlistsPreviewImpl +} + +// Get an artifact allowlist. +// +// Get the artifact allowlist of a certain artifact type. The caller must be a +// metastore admin or have the **MANAGE ALLOWLIST** privilege on the metastore. +func (a *ArtifactAllowlistsPreviewAPI) GetByArtifactType(ctx context.Context, artifactType ArtifactType) (*ArtifactAllowlistInfo, error) { + return a.artifactAllowlistsPreviewImpl.Get(ctx, GetArtifactAllowlistRequest{ + ArtifactType: artifactType, + }) +} + +type CatalogsPreviewInterface interface { + + // Create a catalog. + // + // Creates a new catalog instance in the parent metastore if the caller is a + // metastore admin or has the **CREATE_CATALOG** privilege. + Create(ctx context.Context, request CreateCatalog) (*CatalogInfo, error) + + // Delete a catalog. + // + // Deletes the catalog that matches the supplied name. The caller must be a + // metastore admin or the owner of the catalog. + Delete(ctx context.Context, request DeleteCatalogRequest) error + + // Delete a catalog. + // + // Deletes the catalog that matches the supplied name. The caller must be a + // metastore admin or the owner of the catalog. + DeleteByName(ctx context.Context, name string) error + + // Get a catalog. + // + // Gets the specified catalog in a metastore. The caller must be a metastore + // admin, the owner of the catalog, or a user that has the **USE_CATALOG** + // privilege set for their account. + Get(ctx context.Context, request GetCatalogRequest) (*CatalogInfo, error) + + // Get a catalog. + // + // Gets the specified catalog in a metastore. The caller must be a metastore + // admin, the owner of the catalog, or a user that has the **USE_CATALOG** + // privilege set for their account. + GetByName(ctx context.Context, name string) (*CatalogInfo, error) + + // List catalogs. + // + // Gets an array of catalogs in the metastore. If the caller is the metastore + // admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the + // caller (or for which the caller has the **USE_CATALOG** privilege) will be + // retrieved. There is no guarantee of a specific ordering of the elements in + // the array. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo] + + // List catalogs. + // + // Gets an array of catalogs in the metastore. If the caller is the metastore + // admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the + // caller (or for which the caller has the **USE_CATALOG** privilege) will be + // retrieved. There is no guarantee of a specific ordering of the elements in + // the array. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListCatalogsRequest) ([]CatalogInfo, error) + + // Update a catalog. + // + // Updates the catalog that matches the supplied name. The caller must be either + // the owner of the catalog, or a metastore admin (when changing the owner field + // of the catalog). + Update(ctx context.Context, request UpdateCatalog) (*CatalogInfo, error) +} + +func NewCatalogsPreview(client *client.DatabricksClient) *CatalogsPreviewAPI { + return &CatalogsPreviewAPI{ + catalogsPreviewImpl: catalogsPreviewImpl{ + client: client, + }, + } +} + +// A catalog is the first layer of Unity Catalog’s three-level namespace. +// It’s used to organize your data assets. Users can see all catalogs on which +// they have been assigned the USE_CATALOG data permission. +// +// In Unity Catalog, admins and data stewards manage users and their access to +// data centrally across all of the workspaces in a Databricks account. Users in +// different workspaces can share access to the same data, depending on +// privileges granted centrally in Unity Catalog. +type CatalogsPreviewAPI struct { + catalogsPreviewImpl +} + +// Delete a catalog. +// +// Deletes the catalog that matches the supplied name. The caller must be a +// metastore admin or the owner of the catalog. +func (a *CatalogsPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.catalogsPreviewImpl.Delete(ctx, DeleteCatalogRequest{ + Name: name, + }) +} + +// Get a catalog. +// +// Gets the specified catalog in a metastore. The caller must be a metastore +// admin, the owner of the catalog, or a user that has the **USE_CATALOG** +// privilege set for their account. +func (a *CatalogsPreviewAPI) GetByName(ctx context.Context, name string) (*CatalogInfo, error) { + return a.catalogsPreviewImpl.Get(ctx, GetCatalogRequest{ + Name: name, + }) +} + +type ConnectionsPreviewInterface interface { + + // Create a connection. + // + // Creates a new connection + // + // Creates a new connection to an external data source. It allows users to + // specify connection details and configurations for interaction with the + // external server. + Create(ctx context.Context, request CreateConnection) (*ConnectionInfo, error) + + // Delete a connection. + // + // Deletes the connection that matches the supplied name. + Delete(ctx context.Context, request DeleteConnectionRequest) error + + // Delete a connection. + // + // Deletes the connection that matches the supplied name. + DeleteByName(ctx context.Context, name string) error + + // Get a connection. + // + // Gets a connection from it's name. + Get(ctx context.Context, request GetConnectionRequest) (*ConnectionInfo, error) + + // Get a connection. + // + // Gets a connection from it's name. + GetByName(ctx context.Context, name string) (*ConnectionInfo, error) + + // List connections. + // + // List all connections. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListConnectionsRequest) listing.Iterator[ConnectionInfo] + + // List connections. + // + // List all connections. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListConnectionsRequest) ([]ConnectionInfo, error) + + // ConnectionInfoNameToFullNameMap calls [ConnectionsPreviewAPI.ListAll] and creates a map of results with [ConnectionInfo].Name as key and [ConnectionInfo].FullName as value. + // + // Returns an error if there's more than one [ConnectionInfo] with the same .Name. + // + // Note: All [ConnectionInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ConnectionInfoNameToFullNameMap(ctx context.Context, request ListConnectionsRequest) (map[string]string, error) + + // Update a connection. + // + // Updates the connection that matches the supplied name. + Update(ctx context.Context, request UpdateConnection) (*ConnectionInfo, error) +} + +func NewConnectionsPreview(client *client.DatabricksClient) *ConnectionsPreviewAPI { + return &ConnectionsPreviewAPI{ + connectionsPreviewImpl: connectionsPreviewImpl{ + client: client, + }, + } +} + +// Connections allow for creating a connection to an external data source. +// +// A connection is an abstraction of an external data source that can be +// connected from Databricks Compute. Creating a connection object is the first +// step to managing external data sources within Unity Catalog, with the second +// step being creating a data object (catalog, schema, or table) using the +// connection. Data objects derived from a connection can be written to or read +// from similar to other Unity Catalog data objects based on cloud storage. +// Users may create different types of connections with each connection having a +// unique set of configuration options to support credential management and +// other settings. +type ConnectionsPreviewAPI struct { + connectionsPreviewImpl +} + +// Delete a connection. +// +// Deletes the connection that matches the supplied name. +func (a *ConnectionsPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.connectionsPreviewImpl.Delete(ctx, DeleteConnectionRequest{ + Name: name, + }) +} + +// Get a connection. +// +// Gets a connection from it's name. +func (a *ConnectionsPreviewAPI) GetByName(ctx context.Context, name string) (*ConnectionInfo, error) { + return a.connectionsPreviewImpl.Get(ctx, GetConnectionRequest{ + Name: name, + }) +} + +// ConnectionInfoNameToFullNameMap calls [ConnectionsPreviewAPI.ListAll] and creates a map of results with [ConnectionInfo].Name as key and [ConnectionInfo].FullName as value. +// +// Returns an error if there's more than one [ConnectionInfo] with the same .Name. +// +// Note: All [ConnectionInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ConnectionsPreviewAPI) ConnectionInfoNameToFullNameMap(ctx context.Context, request ListConnectionsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.FullName + } + return mapping, nil +} + +type CredentialsPreviewInterface interface { + + // Create a credential. + // + // Creates a new credential. The type of credential to be created is determined + // by the **purpose** field, which should be either **SERVICE** or **STORAGE**. + // + // The caller must be a metastore admin or have the metastore privilege + // **CREATE_STORAGE_CREDENTIAL** for storage credentials, or + // **CREATE_SERVICE_CREDENTIAL** for service credentials. + CreateCredential(ctx context.Context, request CreateCredentialRequest) (*CredentialInfo, error) + + // Delete a credential. + // + // Deletes a service or storage credential from the metastore. The caller must + // be an owner of the credential. + DeleteCredential(ctx context.Context, request DeleteCredentialRequest) error + + // Delete a credential. + // + // Deletes a service or storage credential from the metastore. The caller must + // be an owner of the credential. + DeleteCredentialByNameArg(ctx context.Context, nameArg string) error + + // Generate a temporary service credential. + // + // Returns a set of temporary credentials generated using the specified service + // credential. The caller must be a metastore admin or have the metastore + // privilege **ACCESS** on the service credential. + GenerateTemporaryServiceCredential(ctx context.Context, request GenerateTemporaryServiceCredentialRequest) (*TemporaryCredentials, error) + + // Get a credential. + // + // Gets a service or storage credential from the metastore. The caller must be a + // metastore admin, the owner of the credential, or have any permission on the + // credential. + GetCredential(ctx context.Context, request GetCredentialRequest) (*CredentialInfo, error) + + // Get a credential. + // + // Gets a service or storage credential from the metastore. The caller must be a + // metastore admin, the owner of the credential, or have any permission on the + // credential. + GetCredentialByNameArg(ctx context.Context, nameArg string) (*CredentialInfo, error) + + // List credentials. + // + // Gets an array of credentials (as __CredentialInfo__ objects). + // + // The array is limited to only the credentials that the caller has permission + // to access. If the caller is a metastore admin, retrieval of credentials is + // unrestricted. There is no guarantee of a specific ordering of the elements in + // the array. + // + // This method is generated by Databricks SDK Code Generator. + ListCredentials(ctx context.Context, request ListCredentialsRequest) listing.Iterator[CredentialInfo] + + // List credentials. + // + // Gets an array of credentials (as __CredentialInfo__ objects). + // + // The array is limited to only the credentials that the caller has permission + // to access. If the caller is a metastore admin, retrieval of credentials is + // unrestricted. There is no guarantee of a specific ordering of the elements in + // the array. + // + // This method is generated by Databricks SDK Code Generator. + ListCredentialsAll(ctx context.Context, request ListCredentialsRequest) ([]CredentialInfo, error) + + // Update a credential. + // + // Updates a service or storage credential on the metastore. + // + // The caller must be the owner of the credential or a metastore admin or have + // the `MANAGE` permission. If the caller is a metastore admin, only the + // __owner__ field can be changed. + UpdateCredential(ctx context.Context, request UpdateCredentialRequest) (*CredentialInfo, error) + + // Validate a credential. + // + // Validates a credential. + // + // For service credentials (purpose is **SERVICE**), either the + // __credential_name__ or the cloud-specific credential must be provided. + // + // For storage credentials (purpose is **STORAGE**), at least one of + // __external_location_name__ and __url__ need to be provided. If only one of + // them is provided, it will be used for validation. And if both are provided, + // the __url__ will be used for validation, and __external_location_name__ will + // be ignored when checking overlapping urls. Either the __credential_name__ or + // the cloud-specific credential must be provided. + // + // The caller must be a metastore admin or the credential owner or have the + // required permission on the metastore and the credential (e.g., + // **CREATE_EXTERNAL_LOCATION** when purpose is **STORAGE**). + ValidateCredential(ctx context.Context, request ValidateCredentialRequest) (*ValidateCredentialResponse, error) +} + +func NewCredentialsPreview(client *client.DatabricksClient) *CredentialsPreviewAPI { + return &CredentialsPreviewAPI{ + credentialsPreviewImpl: credentialsPreviewImpl{ + client: client, + }, + } +} + +// A credential represents an authentication and authorization mechanism for +// accessing services on your cloud tenant. Each credential is subject to Unity +// Catalog access-control policies that control which users and groups can +// access the credential. +// +// To create credentials, you must be a Databricks account admin or have the +// `CREATE SERVICE CREDENTIAL` privilege. The user who creates the credential +// can delegate ownership to another user or group to manage permissions on it. +type CredentialsPreviewAPI struct { + credentialsPreviewImpl +} + +// Delete a credential. +// +// Deletes a service or storage credential from the metastore. The caller must +// be an owner of the credential. +func (a *CredentialsPreviewAPI) DeleteCredentialByNameArg(ctx context.Context, nameArg string) error { + return a.credentialsPreviewImpl.DeleteCredential(ctx, DeleteCredentialRequest{ + NameArg: nameArg, + }) +} + +// Get a credential. +// +// Gets a service or storage credential from the metastore. The caller must be a +// metastore admin, the owner of the credential, or have any permission on the +// credential. +func (a *CredentialsPreviewAPI) GetCredentialByNameArg(ctx context.Context, nameArg string) (*CredentialInfo, error) { + return a.credentialsPreviewImpl.GetCredential(ctx, GetCredentialRequest{ + NameArg: nameArg, + }) +} + +type ExternalLocationsPreviewInterface interface { + + // Create an external location. + // + // Creates a new external location entry in the metastore. The caller must be a + // metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both + // the metastore and the associated storage credential. + Create(ctx context.Context, request CreateExternalLocation) (*ExternalLocationInfo, error) + + // Delete an external location. + // + // Deletes the specified external location from the metastore. The caller must + // be the owner of the external location. + Delete(ctx context.Context, request DeleteExternalLocationRequest) error + + // Delete an external location. + // + // Deletes the specified external location from the metastore. The caller must + // be the owner of the external location. + DeleteByName(ctx context.Context, name string) error + + // Get an external location. + // + // Gets an external location from the metastore. The caller must be either a + // metastore admin, the owner of the external location, or a user that has some + // privilege on the external location. + Get(ctx context.Context, request GetExternalLocationRequest) (*ExternalLocationInfo, error) + + // Get an external location. + // + // Gets an external location from the metastore. The caller must be either a + // metastore admin, the owner of the external location, or a user that has some + // privilege on the external location. + GetByName(ctx context.Context, name string) (*ExternalLocationInfo, error) + + // List external locations. + // + // Gets an array of external locations (__ExternalLocationInfo__ objects) from + // the metastore. The caller must be a metastore admin, the owner of the + // external location, or a user that has some privilege on the external + // location. There is no guarantee of a specific ordering of the elements in the + // array. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListExternalLocationsRequest) listing.Iterator[ExternalLocationInfo] + + // List external locations. + // + // Gets an array of external locations (__ExternalLocationInfo__ objects) from + // the metastore. The caller must be a metastore admin, the owner of the + // external location, or a user that has some privilege on the external + // location. There is no guarantee of a specific ordering of the elements in the + // array. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListExternalLocationsRequest) ([]ExternalLocationInfo, error) + + // Update an external location. + // + // Updates an external location in the metastore. The caller must be the owner + // of the external location, or be a metastore admin. In the second case, the + // admin can only update the name of the external location. + Update(ctx context.Context, request UpdateExternalLocation) (*ExternalLocationInfo, error) +} + +func NewExternalLocationsPreview(client *client.DatabricksClient) *ExternalLocationsPreviewAPI { + return &ExternalLocationsPreviewAPI{ + externalLocationsPreviewImpl: externalLocationsPreviewImpl{ + client: client, + }, + } +} + +// An external location is an object that combines a cloud storage path with a +// storage credential that authorizes access to the cloud storage path. Each +// external location is subject to Unity Catalog access-control policies that +// control which users and groups can access the credential. If a user does not +// have access to an external location in Unity Catalog, the request fails and +// Unity Catalog does not attempt to authenticate to your cloud tenant on the +// user’s behalf. +// +// Databricks recommends using external locations rather than using storage +// credentials directly. +// +// To create external locations, you must be a metastore admin or a user with +// the **CREATE_EXTERNAL_LOCATION** privilege. +type ExternalLocationsPreviewAPI struct { + externalLocationsPreviewImpl +} + +// Delete an external location. +// +// Deletes the specified external location from the metastore. The caller must +// be the owner of the external location. +func (a *ExternalLocationsPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.externalLocationsPreviewImpl.Delete(ctx, DeleteExternalLocationRequest{ + Name: name, + }) +} + +// Get an external location. +// +// Gets an external location from the metastore. The caller must be either a +// metastore admin, the owner of the external location, or a user that has some +// privilege on the external location. +func (a *ExternalLocationsPreviewAPI) GetByName(ctx context.Context, name string) (*ExternalLocationInfo, error) { + return a.externalLocationsPreviewImpl.Get(ctx, GetExternalLocationRequest{ + Name: name, + }) +} + +type FunctionsPreviewInterface interface { + + // Create a function. + // + // **WARNING: This API is experimental and will change in future versions** + // + // Creates a new function + // + // The user must have the following permissions in order for the function to be + // created: - **USE_CATALOG** on the function's parent catalog - **USE_SCHEMA** + // and **CREATE_FUNCTION** on the function's parent schema + Create(ctx context.Context, request CreateFunctionRequest) (*FunctionInfo, error) + + // Delete a function. + // + // Deletes the function that matches the supplied name. For the deletion to + // succeed, the user must satisfy one of the following conditions: - Is the + // owner of the function's parent catalog - Is the owner of the function's + // parent schema and have the **USE_CATALOG** privilege on its parent catalog - + // Is the owner of the function itself and have both the **USE_CATALOG** + // privilege on its parent catalog and the **USE_SCHEMA** privilege on its + // parent schema + Delete(ctx context.Context, request DeleteFunctionRequest) error + + // Delete a function. + // + // Deletes the function that matches the supplied name. For the deletion to + // succeed, the user must satisfy one of the following conditions: - Is the + // owner of the function's parent catalog - Is the owner of the function's + // parent schema and have the **USE_CATALOG** privilege on its parent catalog - + // Is the owner of the function itself and have both the **USE_CATALOG** + // privilege on its parent catalog and the **USE_SCHEMA** privilege on its + // parent schema + DeleteByName(ctx context.Context, name string) error + + // Get a function. + // + // Gets a function from within a parent catalog and schema. For the fetch to + // succeed, the user must satisfy one of the following requirements: - Is a + // metastore admin - Is an owner of the function's parent catalog - Have the + // **USE_CATALOG** privilege on the function's parent catalog and be the owner + // of the function - Have the **USE_CATALOG** privilege on the function's parent + // catalog, the **USE_SCHEMA** privilege on the function's parent schema, and + // the **EXECUTE** privilege on the function itself + Get(ctx context.Context, request GetFunctionRequest) (*FunctionInfo, error) + + // Get a function. + // + // Gets a function from within a parent catalog and schema. For the fetch to + // succeed, the user must satisfy one of the following requirements: - Is a + // metastore admin - Is an owner of the function's parent catalog - Have the + // **USE_CATALOG** privilege on the function's parent catalog and be the owner + // of the function - Have the **USE_CATALOG** privilege on the function's parent + // catalog, the **USE_SCHEMA** privilege on the function's parent schema, and + // the **EXECUTE** privilege on the function itself + GetByName(ctx context.Context, name string) (*FunctionInfo, error) + + // List functions. + // + // List functions within the specified parent catalog and schema. If the user is + // a metastore admin, all functions are returned in the output list. Otherwise, + // the user must have the **USE_CATALOG** privilege on the catalog and the + // **USE_SCHEMA** privilege on the schema, and the output list contains only + // functions for which either the user has the **EXECUTE** privilege or the user + // is the owner. There is no guarantee of a specific ordering of the elements in + // the array. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListFunctionsRequest) listing.Iterator[FunctionInfo] + + // List functions. + // + // List functions within the specified parent catalog and schema. If the user is + // a metastore admin, all functions are returned in the output list. Otherwise, + // the user must have the **USE_CATALOG** privilege on the catalog and the + // **USE_SCHEMA** privilege on the schema, and the output list contains only + // functions for which either the user has the **EXECUTE** privilege or the user + // is the owner. There is no guarantee of a specific ordering of the elements in + // the array. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListFunctionsRequest) ([]FunctionInfo, error) + + // FunctionInfoNameToFullNameMap calls [FunctionsPreviewAPI.ListAll] and creates a map of results with [FunctionInfo].Name as key and [FunctionInfo].FullName as value. + // + // Returns an error if there's more than one [FunctionInfo] with the same .Name. + // + // Note: All [FunctionInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + FunctionInfoNameToFullNameMap(ctx context.Context, request ListFunctionsRequest) (map[string]string, error) + + // Update a function. + // + // Updates the function that matches the supplied name. Only the owner of the + // function can be updated. If the user is not a metastore admin, the user must + // be a member of the group that is the new function owner. - Is a metastore + // admin - Is the owner of the function's parent catalog - Is the owner of the + // function's parent schema and has the **USE_CATALOG** privilege on its parent + // catalog - Is the owner of the function itself and has the **USE_CATALOG** + // privilege on its parent catalog as well as the **USE_SCHEMA** privilege on + // the function's parent schema. + Update(ctx context.Context, request UpdateFunction) (*FunctionInfo, error) +} + +func NewFunctionsPreview(client *client.DatabricksClient) *FunctionsPreviewAPI { + return &FunctionsPreviewAPI{ + functionsPreviewImpl: functionsPreviewImpl{ + client: client, + }, + } +} + +// Functions implement User-Defined Functions (UDFs) in Unity Catalog. +// +// The function implementation can be any SQL expression or Query, and it can be +// invoked wherever a table reference is allowed in a query. In Unity Catalog, a +// function resides at the same level as a table, so it can be referenced with +// the form __catalog_name__.__schema_name__.__function_name__. +type FunctionsPreviewAPI struct { + functionsPreviewImpl +} + +// Delete a function. +// +// Deletes the function that matches the supplied name. For the deletion to +// succeed, the user must satisfy one of the following conditions: - Is the +// owner of the function's parent catalog - Is the owner of the function's +// parent schema and have the **USE_CATALOG** privilege on its parent catalog - +// Is the owner of the function itself and have both the **USE_CATALOG** +// privilege on its parent catalog and the **USE_SCHEMA** privilege on its +// parent schema +func (a *FunctionsPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.functionsPreviewImpl.Delete(ctx, DeleteFunctionRequest{ + Name: name, + }) +} + +// Get a function. +// +// Gets a function from within a parent catalog and schema. For the fetch to +// succeed, the user must satisfy one of the following requirements: - Is a +// metastore admin - Is an owner of the function's parent catalog - Have the +// **USE_CATALOG** privilege on the function's parent catalog and be the owner +// of the function - Have the **USE_CATALOG** privilege on the function's parent +// catalog, the **USE_SCHEMA** privilege on the function's parent schema, and +// the **EXECUTE** privilege on the function itself +func (a *FunctionsPreviewAPI) GetByName(ctx context.Context, name string) (*FunctionInfo, error) { + return a.functionsPreviewImpl.Get(ctx, GetFunctionRequest{ + Name: name, + }) +} + +// FunctionInfoNameToFullNameMap calls [FunctionsPreviewAPI.ListAll] and creates a map of results with [FunctionInfo].Name as key and [FunctionInfo].FullName as value. +// +// Returns an error if there's more than one [FunctionInfo] with the same .Name. +// +// Note: All [FunctionInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *FunctionsPreviewAPI) FunctionInfoNameToFullNameMap(ctx context.Context, request ListFunctionsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.FullName + } + return mapping, nil +} + +type GrantsPreviewInterface interface { + + // Get permissions. + // + // Gets the permissions for a securable. + Get(ctx context.Context, request GetGrantRequest) (*PermissionsList, error) + + // Get permissions. + // + // Gets the permissions for a securable. + GetBySecurableTypeAndFullName(ctx context.Context, securableType SecurableType, fullName string) (*PermissionsList, error) + + // Get effective permissions. + // + // Gets the effective permissions for a securable. + GetEffective(ctx context.Context, request GetEffectiveRequest) (*EffectivePermissionsList, error) + + // Get effective permissions. + // + // Gets the effective permissions for a securable. + GetEffectiveBySecurableTypeAndFullName(ctx context.Context, securableType SecurableType, fullName string) (*EffectivePermissionsList, error) + + // Update permissions. + // + // Updates the permissions for a securable. + Update(ctx context.Context, request UpdatePermissions) (*PermissionsList, error) +} + +func NewGrantsPreview(client *client.DatabricksClient) *GrantsPreviewAPI { + return &GrantsPreviewAPI{ + grantsPreviewImpl: grantsPreviewImpl{ + client: client, + }, + } +} + +// In Unity Catalog, data is secure by default. Initially, users have no access +// to data in a metastore. Access can be granted by either a metastore admin, +// the owner of an object, or the owner of the catalog or schema that contains +// the object. Securable objects in Unity Catalog are hierarchical and +// privileges are inherited downward. +// +// Securable objects in Unity Catalog are hierarchical and privileges are +// inherited downward. This means that granting a privilege on the catalog +// automatically grants the privilege to all current and future objects within +// the catalog. Similarly, privileges granted on a schema are inherited by all +// current and future objects within that schema. +type GrantsPreviewAPI struct { + grantsPreviewImpl +} + +// Get permissions. +// +// Gets the permissions for a securable. +func (a *GrantsPreviewAPI) GetBySecurableTypeAndFullName(ctx context.Context, securableType SecurableType, fullName string) (*PermissionsList, error) { + return a.grantsPreviewImpl.Get(ctx, GetGrantRequest{ + SecurableType: securableType, + FullName: fullName, + }) +} + +// Get effective permissions. +// +// Gets the effective permissions for a securable. +func (a *GrantsPreviewAPI) GetEffectiveBySecurableTypeAndFullName(ctx context.Context, securableType SecurableType, fullName string) (*EffectivePermissionsList, error) { + return a.grantsPreviewImpl.GetEffective(ctx, GetEffectiveRequest{ + SecurableType: securableType, + FullName: fullName, + }) +} + +type MetastoresPreviewInterface interface { + + // Create an assignment. + // + // Creates a new metastore assignment. If an assignment for the same + // __workspace_id__ exists, it will be overwritten by the new __metastore_id__ + // and __default_catalog_name__. The caller must be an account admin. + Assign(ctx context.Context, request CreateMetastoreAssignment) error + + // Create a metastore. + // + // Creates a new metastore based on a provided name and optional storage root + // path. By default (if the __owner__ field is not set), the owner of the new + // metastore is the user calling the __createMetastore__ API. If the __owner__ + // field is set to the empty string (**""**), the ownership is assigned to the + // System User instead. + Create(ctx context.Context, request CreateMetastore) (*MetastoreInfo, error) + + // Get metastore assignment for workspace. + // + // Gets the metastore assignment for the workspace being accessed. + Current(ctx context.Context) (*MetastoreAssignment, error) + + // Delete a metastore. + // + // Deletes a metastore. The caller must be a metastore admin. + Delete(ctx context.Context, request DeleteMetastoreRequest) error + + // Delete a metastore. + // + // Deletes a metastore. The caller must be a metastore admin. + DeleteById(ctx context.Context, id string) error + + // Get a metastore. + // + // Gets a metastore that matches the supplied ID. The caller must be a metastore + // admin to retrieve this info. + Get(ctx context.Context, request GetMetastoreRequest) (*MetastoreInfo, error) + + // Get a metastore. + // + // Gets a metastore that matches the supplied ID. The caller must be a metastore + // admin to retrieve this info. + GetById(ctx context.Context, id string) (*MetastoreInfo, error) + + // List metastores. + // + // Gets an array of the available metastores (as __MetastoreInfo__ objects). The + // caller must be an admin to retrieve this info. There is no guarantee of a + // specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context) listing.Iterator[MetastoreInfo] + + // List metastores. + // + // Gets an array of the available metastores (as __MetastoreInfo__ objects). The + // caller must be an admin to retrieve this info. There is no guarantee of a + // specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context) ([]MetastoreInfo, error) + + // MetastoreInfoNameToMetastoreIdMap calls [MetastoresPreviewAPI.ListAll] and creates a map of results with [MetastoreInfo].Name as key and [MetastoreInfo].MetastoreId as value. + // + // Returns an error if there's more than one [MetastoreInfo] with the same .Name. + // + // Note: All [MetastoreInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + MetastoreInfoNameToMetastoreIdMap(ctx context.Context) (map[string]string, error) + + // GetByName calls [MetastoresPreviewAPI.MetastoreInfoNameToMetastoreIdMap] and returns a single [MetastoreInfo]. + // + // Returns an error if there's more than one [MetastoreInfo] with the same .Name. + // + // Note: All [MetastoreInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*MetastoreInfo, error) + + // Get a metastore summary. + // + // Gets information about a metastore. This summary includes the storage + // credential, the cloud vendor, the cloud region, and the global metastore ID. + Summary(ctx context.Context) (*GetMetastoreSummaryResponse, error) + + // Delete an assignment. + // + // Deletes a metastore assignment. The caller must be an account administrator. + Unassign(ctx context.Context, request UnassignRequest) error + + // Delete an assignment. + // + // Deletes a metastore assignment. The caller must be an account administrator. + UnassignByWorkspaceId(ctx context.Context, workspaceId int64) error + + // Update a metastore. + // + // Updates information for a specific metastore. The caller must be a metastore + // admin. If the __owner__ field is set to the empty string (**""**), the + // ownership is updated to the System User. + Update(ctx context.Context, request UpdateMetastore) (*MetastoreInfo, error) + + // Update an assignment. + // + // Updates a metastore assignment. This operation can be used to update + // __metastore_id__ or __default_catalog_name__ for a specified Workspace, if + // the Workspace is already assigned a metastore. The caller must be an account + // admin to update __metastore_id__; otherwise, the caller can be a Workspace + // admin. + UpdateAssignment(ctx context.Context, request UpdateMetastoreAssignment) error +} + +func NewMetastoresPreview(client *client.DatabricksClient) *MetastoresPreviewAPI { + return &MetastoresPreviewAPI{ + metastoresPreviewImpl: metastoresPreviewImpl{ + client: client, + }, + } +} + +// A metastore is the top-level container of objects in Unity Catalog. It stores +// data assets (tables and views) and the permissions that govern access to +// them. Databricks account admins can create metastores and assign them to +// Databricks workspaces to control which workloads use each metastore. For a +// workspace to use Unity Catalog, it must have a Unity Catalog metastore +// attached. +// +// Each metastore is configured with a root storage location in a cloud storage +// account. This storage location is used for metadata and managed tables data. +// +// NOTE: This metastore is distinct from the metastore included in Databricks +// workspaces created before Unity Catalog was released. If your workspace +// includes a legacy Hive metastore, the data in that metastore is available in +// a catalog named hive_metastore. +type MetastoresPreviewAPI struct { + metastoresPreviewImpl +} + +// Delete a metastore. +// +// Deletes a metastore. The caller must be a metastore admin. +func (a *MetastoresPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.metastoresPreviewImpl.Delete(ctx, DeleteMetastoreRequest{ + Id: id, + }) +} + +// Get a metastore. +// +// Gets a metastore that matches the supplied ID. The caller must be a metastore +// admin to retrieve this info. +func (a *MetastoresPreviewAPI) GetById(ctx context.Context, id string) (*MetastoreInfo, error) { + return a.metastoresPreviewImpl.Get(ctx, GetMetastoreRequest{ + Id: id, + }) +} + +// MetastoreInfoNameToMetastoreIdMap calls [MetastoresPreviewAPI.ListAll] and creates a map of results with [MetastoreInfo].Name as key and [MetastoreInfo].MetastoreId as value. +// +// Returns an error if there's more than one [MetastoreInfo] with the same .Name. +// +// Note: All [MetastoreInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *MetastoresPreviewAPI) MetastoreInfoNameToMetastoreIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.MetastoreId + } + return mapping, nil +} + +// GetByName calls [MetastoresPreviewAPI.MetastoreInfoNameToMetastoreIdMap] and returns a single [MetastoreInfo]. +// +// Returns an error if there's more than one [MetastoreInfo] with the same .Name. +// +// Note: All [MetastoreInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *MetastoresPreviewAPI) GetByName(ctx context.Context, name string) (*MetastoreInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]MetastoreInfo{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("MetastoreInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of MetastoreInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +// Delete an assignment. +// +// Deletes a metastore assignment. The caller must be an account administrator. +func (a *MetastoresPreviewAPI) UnassignByWorkspaceId(ctx context.Context, workspaceId int64) error { + return a.metastoresPreviewImpl.Unassign(ctx, UnassignRequest{ + WorkspaceId: workspaceId, + }) +} + +type ModelVersionsPreviewInterface interface { + + // Delete a Model Version. + // + // Deletes a model version from the specified registered model. Any aliases + // assigned to the model version will also be deleted. + // + // The caller must be a metastore admin or an owner of the parent registered + // model. For the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + Delete(ctx context.Context, request DeleteModelVersionRequest) error + + // Delete a Model Version. + // + // Deletes a model version from the specified registered model. Any aliases + // assigned to the model version will also be deleted. + // + // The caller must be a metastore admin or an owner of the parent registered + // model. For the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + DeleteByFullNameAndVersion(ctx context.Context, fullName string, version int) error + + // Get a Model Version. + // + // Get a model version. + // + // The caller must be a metastore admin or an owner of (or have the **EXECUTE** + // privilege on) the parent registered model. For the latter case, the caller + // must also be the owner or have the **USE_CATALOG** privilege on the parent + // catalog and the **USE_SCHEMA** privilege on the parent schema. + Get(ctx context.Context, request GetModelVersionRequest) (*ModelVersionInfo, error) + + // Get a Model Version. + // + // Get a model version. + // + // The caller must be a metastore admin or an owner of (or have the **EXECUTE** + // privilege on) the parent registered model. For the latter case, the caller + // must also be the owner or have the **USE_CATALOG** privilege on the parent + // catalog and the **USE_SCHEMA** privilege on the parent schema. + GetByFullNameAndVersion(ctx context.Context, fullName string, version int) (*ModelVersionInfo, error) + + // Get Model Version By Alias. + // + // Get a model version by alias. + // + // The caller must be a metastore admin or an owner of (or have the **EXECUTE** + // privilege on) the registered model. For the latter case, the caller must also + // be the owner or have the **USE_CATALOG** privilege on the parent catalog and + // the **USE_SCHEMA** privilege on the parent schema. + GetByAlias(ctx context.Context, request GetByAliasRequest) (*ModelVersionInfo, error) + + // Get Model Version By Alias. + // + // Get a model version by alias. + // + // The caller must be a metastore admin or an owner of (or have the **EXECUTE** + // privilege on) the registered model. For the latter case, the caller must also + // be the owner or have the **USE_CATALOG** privilege on the parent catalog and + // the **USE_SCHEMA** privilege on the parent schema. + GetByAliasByFullNameAndAlias(ctx context.Context, fullName string, alias string) (*ModelVersionInfo, error) + + // List Model Versions. + // + // List model versions. You can list model versions under a particular schema, + // or list all model versions in the current metastore. + // + // The returned models are filtered based on the privileges of the calling user. + // For example, the metastore admin is able to list all the model versions. A + // regular user needs to be the owner or have the **EXECUTE** privilege on the + // parent registered model to recieve the model versions in the response. For + // the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + // + // There is no guarantee of a specific ordering of the elements in the response. + // The elements in the response will not contain any aliases or tags. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListModelVersionsRequest) listing.Iterator[ModelVersionInfo] + + // List Model Versions. + // + // List model versions. You can list model versions under a particular schema, + // or list all model versions in the current metastore. + // + // The returned models are filtered based on the privileges of the calling user. + // For example, the metastore admin is able to list all the model versions. A + // regular user needs to be the owner or have the **EXECUTE** privilege on the + // parent registered model to recieve the model versions in the response. For + // the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + // + // There is no guarantee of a specific ordering of the elements in the response. + // The elements in the response will not contain any aliases or tags. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListModelVersionsRequest) ([]ModelVersionInfo, error) + + // List Model Versions. + // + // List model versions. You can list model versions under a particular schema, + // or list all model versions in the current metastore. + // + // The returned models are filtered based on the privileges of the calling user. + // For example, the metastore admin is able to list all the model versions. A + // regular user needs to be the owner or have the **EXECUTE** privilege on the + // parent registered model to recieve the model versions in the response. For + // the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + // + // There is no guarantee of a specific ordering of the elements in the response. + // The elements in the response will not contain any aliases or tags. + ListByFullName(ctx context.Context, fullName string) (*ListModelVersionsResponse, error) + + // Update a Model Version. + // + // Updates the specified model version. + // + // The caller must be a metastore admin or an owner of the parent registered + // model. For the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + // + // Currently only the comment of the model version can be updated. + Update(ctx context.Context, request UpdateModelVersionRequest) (*ModelVersionInfo, error) +} + +func NewModelVersionsPreview(client *client.DatabricksClient) *ModelVersionsPreviewAPI { + return &ModelVersionsPreviewAPI{ + modelVersionsPreviewImpl: modelVersionsPreviewImpl{ + client: client, + }, + } +} + +// Databricks provides a hosted version of MLflow Model Registry in Unity +// Catalog. Models in Unity Catalog provide centralized access control, +// auditing, lineage, and discovery of ML models across Databricks workspaces. +// +// This API reference documents the REST endpoints for managing model versions +// in Unity Catalog. For more details, see the [registered models API +// docs](/api/workspace/registeredmodels). +type ModelVersionsPreviewAPI struct { + modelVersionsPreviewImpl +} + +// Delete a Model Version. +// +// Deletes a model version from the specified registered model. Any aliases +// assigned to the model version will also be deleted. +// +// The caller must be a metastore admin or an owner of the parent registered +// model. For the latter case, the caller must also be the owner or have the +// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** +// privilege on the parent schema. +func (a *ModelVersionsPreviewAPI) DeleteByFullNameAndVersion(ctx context.Context, fullName string, version int) error { + return a.modelVersionsPreviewImpl.Delete(ctx, DeleteModelVersionRequest{ + FullName: fullName, + Version: version, + }) +} + +// Get a Model Version. +// +// Get a model version. +// +// The caller must be a metastore admin or an owner of (or have the **EXECUTE** +// privilege on) the parent registered model. For the latter case, the caller +// must also be the owner or have the **USE_CATALOG** privilege on the parent +// catalog and the **USE_SCHEMA** privilege on the parent schema. +func (a *ModelVersionsPreviewAPI) GetByFullNameAndVersion(ctx context.Context, fullName string, version int) (*ModelVersionInfo, error) { + return a.modelVersionsPreviewImpl.Get(ctx, GetModelVersionRequest{ + FullName: fullName, + Version: version, + }) +} + +// Get Model Version By Alias. +// +// Get a model version by alias. +// +// The caller must be a metastore admin or an owner of (or have the **EXECUTE** +// privilege on) the registered model. For the latter case, the caller must also +// be the owner or have the **USE_CATALOG** privilege on the parent catalog and +// the **USE_SCHEMA** privilege on the parent schema. +func (a *ModelVersionsPreviewAPI) GetByAliasByFullNameAndAlias(ctx context.Context, fullName string, alias string) (*ModelVersionInfo, error) { + return a.modelVersionsPreviewImpl.GetByAlias(ctx, GetByAliasRequest{ + FullName: fullName, + Alias: alias, + }) +} + +// List Model Versions. +// +// List model versions. You can list model versions under a particular schema, +// or list all model versions in the current metastore. +// +// The returned models are filtered based on the privileges of the calling user. +// For example, the metastore admin is able to list all the model versions. A +// regular user needs to be the owner or have the **EXECUTE** privilege on the +// parent registered model to recieve the model versions in the response. For +// the latter case, the caller must also be the owner or have the +// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** +// privilege on the parent schema. +// +// There is no guarantee of a specific ordering of the elements in the response. +// The elements in the response will not contain any aliases or tags. +func (a *ModelVersionsPreviewAPI) ListByFullName(ctx context.Context, fullName string) (*ListModelVersionsResponse, error) { + return a.modelVersionsPreviewImpl.internalList(ctx, ListModelVersionsRequest{ + FullName: fullName, + }) +} + +type OnlineTablesPreviewInterface interface { + + // Create an Online Table. + // + // Create a new Online Table. + Create(ctx context.Context, request CreateOnlineTableRequest) (*OnlineTable, error) + + // Delete an Online Table. + // + // Delete an online table. Warning: This will delete all the data in the online + // table. If the source Delta table was deleted or modified since this Online + // Table was created, this will lose the data forever! + Delete(ctx context.Context, request DeleteOnlineTableRequest) error + + // Delete an Online Table. + // + // Delete an online table. Warning: This will delete all the data in the online + // table. If the source Delta table was deleted or modified since this Online + // Table was created, this will lose the data forever! + DeleteByName(ctx context.Context, name string) error + + // Get an Online Table. + // + // Get information about an existing online table and its status. + Get(ctx context.Context, request GetOnlineTableRequest) (*OnlineTable, error) + + // Get an Online Table. + // + // Get information about an existing online table and its status. + GetByName(ctx context.Context, name string) (*OnlineTable, error) +} + +func NewOnlineTablesPreview(client *client.DatabricksClient) *OnlineTablesPreviewAPI { + return &OnlineTablesPreviewAPI{ + onlineTablesPreviewImpl: onlineTablesPreviewImpl{ + client: client, + }, + } +} + +// Online tables provide lower latency and higher QPS access to data from Delta +// tables. +type OnlineTablesPreviewAPI struct { + onlineTablesPreviewImpl +} + +// Delete an Online Table. +// +// Delete an online table. Warning: This will delete all the data in the online +// table. If the source Delta table was deleted or modified since this Online +// Table was created, this will lose the data forever! +func (a *OnlineTablesPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.onlineTablesPreviewImpl.Delete(ctx, DeleteOnlineTableRequest{ + Name: name, + }) +} + +// Get an Online Table. +// +// Get information about an existing online table and its status. +func (a *OnlineTablesPreviewAPI) GetByName(ctx context.Context, name string) (*OnlineTable, error) { + return a.onlineTablesPreviewImpl.Get(ctx, GetOnlineTableRequest{ + Name: name, + }) +} + +type QualityMonitorsPreviewInterface interface { + + // Cancel refresh. + // + // Cancel an active monitor refresh for the given refresh ID. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + // owner of the table + // + // Additionally, the call must be made from the workspace where the monitor was + // created. + CancelRefresh(ctx context.Context, request CancelRefreshRequest) error + + // Create a table monitor. + // + // Creates a new monitor for the specified table. + // + // The caller must either: 1. be an owner of the table's parent catalog, have + // **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on + // the table 2. have **USE_CATALOG** on the table's parent catalog, be an owner + // of the table's parent schema, and have **SELECT** access on the table. 3. + // have the following permissions: - **USE_CATALOG** on the table's parent + // catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the + // table. + // + // Workspace assets, such as the dashboard, will be created in the workspace + // where this call was made. + Create(ctx context.Context, request CreateMonitor) (*MonitorInfo, error) + + // Delete a table monitor. + // + // Deletes a monitor for the specified table. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + // owner of the table. + // + // Additionally, the call must be made from the workspace where the monitor was + // created. + // + // Note that the metric tables and dashboard will not be deleted as part of this + // call; those assets must be manually cleaned up (if desired). + Delete(ctx context.Context, request DeleteQualityMonitorRequest) error + + // Delete a table monitor. + // + // Deletes a monitor for the specified table. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + // owner of the table. + // + // Additionally, the call must be made from the workspace where the monitor was + // created. + // + // Note that the metric tables and dashboard will not be deleted as part of this + // call; those assets must be manually cleaned up (if desired). + DeleteByTableName(ctx context.Context, tableName string) error + + // Get a table monitor. + // + // Gets a monitor for the specified table. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema. 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - + // **SELECT** privilege on the table. + // + // The returned information includes configuration values, as well as + // information on assets created by the monitor. Some information (e.g., + // dashboard) may be filtered out if the caller is in a different workspace than + // where the monitor was created. + Get(ctx context.Context, request GetQualityMonitorRequest) (*MonitorInfo, error) + + // Get a table monitor. + // + // Gets a monitor for the specified table. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema. 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - + // **SELECT** privilege on the table. + // + // The returned information includes configuration values, as well as + // information on assets created by the monitor. Some information (e.g., + // dashboard) may be filtered out if the caller is in a different workspace than + // where the monitor was created. + GetByTableName(ctx context.Context, tableName string) (*MonitorInfo, error) + + // Get refresh. + // + // Gets info about a specific monitor refresh using the given refresh ID. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - + // **SELECT** privilege on the table. + // + // Additionally, the call must be made from the workspace where the monitor was + // created. + GetRefresh(ctx context.Context, request GetRefreshRequest) (*MonitorRefreshInfo, error) + + // Get refresh. + // + // Gets info about a specific monitor refresh using the given refresh ID. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - + // **SELECT** privilege on the table. + // + // Additionally, the call must be made from the workspace where the monitor was + // created. + GetRefreshByTableNameAndRefreshId(ctx context.Context, tableName string, refreshId string) (*MonitorRefreshInfo, error) + + // List refreshes. + // + // Gets an array containing the history of the most recent refreshes (up to 25) + // for this table. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - + // **SELECT** privilege on the table. + // + // Additionally, the call must be made from the workspace where the monitor was + // created. + ListRefreshes(ctx context.Context, request ListRefreshesRequest) (*MonitorRefreshListResponse, error) + + // List refreshes. + // + // Gets an array containing the history of the most recent refreshes (up to 25) + // for this table. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - + // **SELECT** privilege on the table. + // + // Additionally, the call must be made from the workspace where the monitor was + // created. + ListRefreshesByTableName(ctx context.Context, tableName string) (*MonitorRefreshListResponse, error) + + // Regenerate a monitoring dashboard. + // + // Regenerates the monitoring dashboard for the specified table. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + // owner of the table + // + // The call must be made from the workspace where the monitor was created. The + // dashboard will be regenerated in the assets directory that was specified when + // the monitor was created. + RegenerateDashboard(ctx context.Context, request RegenerateDashboardRequest) (*RegenerateDashboardResponse, error) + + // Queue a metric refresh for a monitor. + // + // Queues a metric refresh on the monitor for the specified table. The refresh + // will execute in the background. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + // owner of the table + // + // Additionally, the call must be made from the workspace where the monitor was + // created. + RunRefresh(ctx context.Context, request RunRefreshRequest) (*MonitorRefreshInfo, error) + + // Update a table monitor. + // + // Updates a monitor for the specified table. + // + // The caller must either: 1. be an owner of the table's parent catalog 2. have + // **USE_CATALOG** on the table's parent catalog and be an owner of the table's + // parent schema 3. have the following permissions: - **USE_CATALOG** on the + // table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + // owner of the table. + // + // Additionally, the call must be made from the workspace where the monitor was + // created, and the caller must be the original creator of the monitor. + // + // Certain configuration fields, such as output asset identifiers, cannot be + // updated. + Update(ctx context.Context, request UpdateMonitor) (*MonitorInfo, error) +} + +func NewQualityMonitorsPreview(client *client.DatabricksClient) *QualityMonitorsPreviewAPI { + return &QualityMonitorsPreviewAPI{ + qualityMonitorsPreviewImpl: qualityMonitorsPreviewImpl{ + client: client, + }, + } +} + +// A monitor computes and monitors data or model quality metrics for a table +// over time. It generates metrics tables and a dashboard that you can use to +// monitor table health and set alerts. +// +// Most write operations require the user to be the owner of the table (or its +// parent schema or parent catalog). Viewing the dashboard, computed metrics, or +// monitor configuration only requires the user to have **SELECT** privileges on +// the table (along with **USE_SCHEMA** and **USE_CATALOG**). +type QualityMonitorsPreviewAPI struct { + qualityMonitorsPreviewImpl +} + +// Delete a table monitor. +// +// Deletes a monitor for the specified table. +// +// The caller must either: 1. be an owner of the table's parent catalog 2. have +// **USE_CATALOG** on the table's parent catalog and be an owner of the table's +// parent schema 3. have the following permissions: - **USE_CATALOG** on the +// table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an +// owner of the table. +// +// Additionally, the call must be made from the workspace where the monitor was +// created. +// +// Note that the metric tables and dashboard will not be deleted as part of this +// call; those assets must be manually cleaned up (if desired). +func (a *QualityMonitorsPreviewAPI) DeleteByTableName(ctx context.Context, tableName string) error { + return a.qualityMonitorsPreviewImpl.Delete(ctx, DeleteQualityMonitorRequest{ + TableName: tableName, + }) +} + +// Get a table monitor. +// +// Gets a monitor for the specified table. +// +// The caller must either: 1. be an owner of the table's parent catalog 2. have +// **USE_CATALOG** on the table's parent catalog and be an owner of the table's +// parent schema. 3. have the following permissions: - **USE_CATALOG** on the +// table's parent catalog - **USE_SCHEMA** on the table's parent schema - +// **SELECT** privilege on the table. +// +// The returned information includes configuration values, as well as +// information on assets created by the monitor. Some information (e.g., +// dashboard) may be filtered out if the caller is in a different workspace than +// where the monitor was created. +func (a *QualityMonitorsPreviewAPI) GetByTableName(ctx context.Context, tableName string) (*MonitorInfo, error) { + return a.qualityMonitorsPreviewImpl.Get(ctx, GetQualityMonitorRequest{ + TableName: tableName, + }) +} + +// Get refresh. +// +// Gets info about a specific monitor refresh using the given refresh ID. +// +// The caller must either: 1. be an owner of the table's parent catalog 2. have +// **USE_CATALOG** on the table's parent catalog and be an owner of the table's +// parent schema 3. have the following permissions: - **USE_CATALOG** on the +// table's parent catalog - **USE_SCHEMA** on the table's parent schema - +// **SELECT** privilege on the table. +// +// Additionally, the call must be made from the workspace where the monitor was +// created. +func (a *QualityMonitorsPreviewAPI) GetRefreshByTableNameAndRefreshId(ctx context.Context, tableName string, refreshId string) (*MonitorRefreshInfo, error) { + return a.qualityMonitorsPreviewImpl.GetRefresh(ctx, GetRefreshRequest{ + TableName: tableName, + RefreshId: refreshId, + }) +} + +// List refreshes. +// +// Gets an array containing the history of the most recent refreshes (up to 25) +// for this table. +// +// The caller must either: 1. be an owner of the table's parent catalog 2. have +// **USE_CATALOG** on the table's parent catalog and be an owner of the table's +// parent schema 3. have the following permissions: - **USE_CATALOG** on the +// table's parent catalog - **USE_SCHEMA** on the table's parent schema - +// **SELECT** privilege on the table. +// +// Additionally, the call must be made from the workspace where the monitor was +// created. +func (a *QualityMonitorsPreviewAPI) ListRefreshesByTableName(ctx context.Context, tableName string) (*MonitorRefreshListResponse, error) { + return a.qualityMonitorsPreviewImpl.ListRefreshes(ctx, ListRefreshesRequest{ + TableName: tableName, + }) +} + +type RegisteredModelsPreviewInterface interface { + + // Create a Registered Model. + // + // Creates a new registered model in Unity Catalog. + // + // File storage for model versions in the registered model will be located in + // the default location which is specified by the parent schema, or the parent + // catalog, or the Metastore. + // + // For registered model creation to succeed, the user must satisfy the following + // conditions: - The caller must be a metastore admin, or be the owner of the + // parent catalog and schema, or have the **USE_CATALOG** privilege on the + // parent catalog and the **USE_SCHEMA** privilege on the parent schema. - The + // caller must have the **CREATE MODEL** or **CREATE FUNCTION** privilege on the + // parent schema. + Create(ctx context.Context, request CreateRegisteredModelRequest) (*RegisteredModelInfo, error) + + // Delete a Registered Model. + // + // Deletes a registered model and all its model versions from the specified + // parent catalog and schema. + // + // The caller must be a metastore admin or an owner of the registered model. For + // the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + Delete(ctx context.Context, request DeleteRegisteredModelRequest) error + + // Delete a Registered Model. + // + // Deletes a registered model and all its model versions from the specified + // parent catalog and schema. + // + // The caller must be a metastore admin or an owner of the registered model. For + // the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + DeleteByFullName(ctx context.Context, fullName string) error + + // Delete a Registered Model Alias. + // + // Deletes a registered model alias. + // + // The caller must be a metastore admin or an owner of the registered model. For + // the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + DeleteAlias(ctx context.Context, request DeleteAliasRequest) error + + // Delete a Registered Model Alias. + // + // Deletes a registered model alias. + // + // The caller must be a metastore admin or an owner of the registered model. For + // the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + DeleteAliasByFullNameAndAlias(ctx context.Context, fullName string, alias string) error + + // Get a Registered Model. + // + // Get a registered model. + // + // The caller must be a metastore admin or an owner of (or have the **EXECUTE** + // privilege on) the registered model. For the latter case, the caller must also + // be the owner or have the **USE_CATALOG** privilege on the parent catalog and + // the **USE_SCHEMA** privilege on the parent schema. + Get(ctx context.Context, request GetRegisteredModelRequest) (*RegisteredModelInfo, error) + + // Get a Registered Model. + // + // Get a registered model. + // + // The caller must be a metastore admin or an owner of (or have the **EXECUTE** + // privilege on) the registered model. For the latter case, the caller must also + // be the owner or have the **USE_CATALOG** privilege on the parent catalog and + // the **USE_SCHEMA** privilege on the parent schema. + GetByFullName(ctx context.Context, fullName string) (*RegisteredModelInfo, error) + + // List Registered Models. + // + // List registered models. You can list registered models under a particular + // schema, or list all registered models in the current metastore. + // + // The returned models are filtered based on the privileges of the calling user. + // For example, the metastore admin is able to list all the registered models. A + // regular user needs to be the owner or have the **EXECUTE** privilege on the + // registered model to recieve the registered models in the response. For the + // latter case, the caller must also be the owner or have the **USE_CATALOG** + // privilege on the parent catalog and the **USE_SCHEMA** privilege on the + // parent schema. + // + // There is no guarantee of a specific ordering of the elements in the response. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListRegisteredModelsRequest) listing.Iterator[RegisteredModelInfo] + + // List Registered Models. + // + // List registered models. You can list registered models under a particular + // schema, or list all registered models in the current metastore. + // + // The returned models are filtered based on the privileges of the calling user. + // For example, the metastore admin is able to list all the registered models. A + // regular user needs to be the owner or have the **EXECUTE** privilege on the + // registered model to recieve the registered models in the response. For the + // latter case, the caller must also be the owner or have the **USE_CATALOG** + // privilege on the parent catalog and the **USE_SCHEMA** privilege on the + // parent schema. + // + // There is no guarantee of a specific ordering of the elements in the response. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListRegisteredModelsRequest) ([]RegisteredModelInfo, error) + + // RegisteredModelInfoNameToFullNameMap calls [RegisteredModelsPreviewAPI.ListAll] and creates a map of results with [RegisteredModelInfo].Name as key and [RegisteredModelInfo].FullName as value. + // + // Returns an error if there's more than one [RegisteredModelInfo] with the same .Name. + // + // Note: All [RegisteredModelInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + RegisteredModelInfoNameToFullNameMap(ctx context.Context, request ListRegisteredModelsRequest) (map[string]string, error) + + // GetByName calls [RegisteredModelsPreviewAPI.RegisteredModelInfoNameToFullNameMap] and returns a single [RegisteredModelInfo]. + // + // Returns an error if there's more than one [RegisteredModelInfo] with the same .Name. + // + // Note: All [RegisteredModelInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*RegisteredModelInfo, error) + + // Set a Registered Model Alias. + // + // Set an alias on the specified registered model. + // + // The caller must be a metastore admin or an owner of the registered model. For + // the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + SetAlias(ctx context.Context, request SetRegisteredModelAliasRequest) (*RegisteredModelAlias, error) + + // Update a Registered Model. + // + // Updates the specified registered model. + // + // The caller must be a metastore admin or an owner of the registered model. For + // the latter case, the caller must also be the owner or have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + // + // Currently only the name, the owner or the comment of the registered model can + // be updated. + Update(ctx context.Context, request UpdateRegisteredModelRequest) (*RegisteredModelInfo, error) +} + +func NewRegisteredModelsPreview(client *client.DatabricksClient) *RegisteredModelsPreviewAPI { + return &RegisteredModelsPreviewAPI{ + registeredModelsPreviewImpl: registeredModelsPreviewImpl{ + client: client, + }, + } +} + +// Databricks provides a hosted version of MLflow Model Registry in Unity +// Catalog. Models in Unity Catalog provide centralized access control, +// auditing, lineage, and discovery of ML models across Databricks workspaces. +// +// An MLflow registered model resides in the third layer of Unity Catalog’s +// three-level namespace. Registered models contain model versions, which +// correspond to actual ML models (MLflow models). Creating new model versions +// currently requires use of the MLflow Python client. Once model versions are +// created, you can load them for batch inference using MLflow Python client +// APIs, or deploy them for real-time serving using Databricks Model Serving. +// +// All operations on registered models and model versions require USE_CATALOG +// permissions on the enclosing catalog and USE_SCHEMA permissions on the +// enclosing schema. In addition, the following additional privileges are +// required for various operations: +// +// * To create a registered model, users must additionally have the CREATE_MODEL +// permission on the target schema. * To view registered model or model version +// metadata, model version data files, or invoke a model version, users must +// additionally have the EXECUTE permission on the registered model * To update +// registered model or model version tags, users must additionally have APPLY +// TAG permissions on the registered model * To update other registered model or +// model version metadata (comments, aliases) create a new model version, or +// update permissions on the registered model, users must be owners of the +// registered model. +// +// Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. +// tagging, grants) that specify a securable type, use "FUNCTION" as the +// securable type. +type RegisteredModelsPreviewAPI struct { + registeredModelsPreviewImpl +} + +// Delete a Registered Model. +// +// Deletes a registered model and all its model versions from the specified +// parent catalog and schema. +// +// The caller must be a metastore admin or an owner of the registered model. For +// the latter case, the caller must also be the owner or have the +// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** +// privilege on the parent schema. +func (a *RegisteredModelsPreviewAPI) DeleteByFullName(ctx context.Context, fullName string) error { + return a.registeredModelsPreviewImpl.Delete(ctx, DeleteRegisteredModelRequest{ + FullName: fullName, + }) +} + +// Delete a Registered Model Alias. +// +// Deletes a registered model alias. +// +// The caller must be a metastore admin or an owner of the registered model. For +// the latter case, the caller must also be the owner or have the +// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** +// privilege on the parent schema. +func (a *RegisteredModelsPreviewAPI) DeleteAliasByFullNameAndAlias(ctx context.Context, fullName string, alias string) error { + return a.registeredModelsPreviewImpl.DeleteAlias(ctx, DeleteAliasRequest{ + FullName: fullName, + Alias: alias, + }) +} + +// Get a Registered Model. +// +// Get a registered model. +// +// The caller must be a metastore admin or an owner of (or have the **EXECUTE** +// privilege on) the registered model. For the latter case, the caller must also +// be the owner or have the **USE_CATALOG** privilege on the parent catalog and +// the **USE_SCHEMA** privilege on the parent schema. +func (a *RegisteredModelsPreviewAPI) GetByFullName(ctx context.Context, fullName string) (*RegisteredModelInfo, error) { + return a.registeredModelsPreviewImpl.Get(ctx, GetRegisteredModelRequest{ + FullName: fullName, + }) +} + +// RegisteredModelInfoNameToFullNameMap calls [RegisteredModelsPreviewAPI.ListAll] and creates a map of results with [RegisteredModelInfo].Name as key and [RegisteredModelInfo].FullName as value. +// +// Returns an error if there's more than one [RegisteredModelInfo] with the same .Name. +// +// Note: All [RegisteredModelInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *RegisteredModelsPreviewAPI) RegisteredModelInfoNameToFullNameMap(ctx context.Context, request ListRegisteredModelsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.FullName + } + return mapping, nil +} + +// GetByName calls [RegisteredModelsPreviewAPI.RegisteredModelInfoNameToFullNameMap] and returns a single [RegisteredModelInfo]. +// +// Returns an error if there's more than one [RegisteredModelInfo] with the same .Name. +// +// Note: All [RegisteredModelInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *RegisteredModelsPreviewAPI) GetByName(ctx context.Context, name string) (*RegisteredModelInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListRegisteredModelsRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]RegisteredModelInfo{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("RegisteredModelInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of RegisteredModelInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type ResourceQuotasPreviewInterface interface { + + // Get information for a single resource quota. + // + // The GetQuota API returns usage information for a single resource quota, + // defined as a child-parent pair. This API also refreshes the quota count if it + // is out of date. Refreshes are triggered asynchronously. The updated count + // might not be returned in the first call. + GetQuota(ctx context.Context, request GetQuotaRequest) (*GetQuotaResponse, error) + + // Get information for a single resource quota. + // + // The GetQuota API returns usage information for a single resource quota, + // defined as a child-parent pair. This API also refreshes the quota count if it + // is out of date. Refreshes are triggered asynchronously. The updated count + // might not be returned in the first call. + GetQuotaByParentSecurableTypeAndParentFullNameAndQuotaName(ctx context.Context, parentSecurableType string, parentFullName string, quotaName string) (*GetQuotaResponse, error) + + // List all resource quotas under a metastore. + // + // ListQuotas returns all quota values under the metastore. There are no SLAs on + // the freshness of the counts returned. This API does not trigger a refresh of + // quota counts. + // + // This method is generated by Databricks SDK Code Generator. + ListQuotas(ctx context.Context, request ListQuotasRequest) listing.Iterator[QuotaInfo] + + // List all resource quotas under a metastore. + // + // ListQuotas returns all quota values under the metastore. There are no SLAs on + // the freshness of the counts returned. This API does not trigger a refresh of + // quota counts. + // + // This method is generated by Databricks SDK Code Generator. + ListQuotasAll(ctx context.Context, request ListQuotasRequest) ([]QuotaInfo, error) +} + +func NewResourceQuotasPreview(client *client.DatabricksClient) *ResourceQuotasPreviewAPI { + return &ResourceQuotasPreviewAPI{ + resourceQuotasPreviewImpl: resourceQuotasPreviewImpl{ + client: client, + }, + } +} + +// Unity Catalog enforces resource quotas on all securable objects, which limits +// the number of resources that can be created. Quotas are expressed in terms of +// a resource type and a parent (for example, tables per metastore or schemas +// per catalog). The resource quota APIs enable you to monitor your current +// usage and limits. For more information on resource quotas see the [Unity +// Catalog documentation]. +// +// [Unity Catalog documentation]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#resource-quotas +type ResourceQuotasPreviewAPI struct { + resourceQuotasPreviewImpl +} + +// Get information for a single resource quota. +// +// The GetQuota API returns usage information for a single resource quota, +// defined as a child-parent pair. This API also refreshes the quota count if it +// is out of date. Refreshes are triggered asynchronously. The updated count +// might not be returned in the first call. +func (a *ResourceQuotasPreviewAPI) GetQuotaByParentSecurableTypeAndParentFullNameAndQuotaName(ctx context.Context, parentSecurableType string, parentFullName string, quotaName string) (*GetQuotaResponse, error) { + return a.resourceQuotasPreviewImpl.GetQuota(ctx, GetQuotaRequest{ + ParentSecurableType: parentSecurableType, + ParentFullName: parentFullName, + QuotaName: quotaName, + }) +} + +type SchemasPreviewInterface interface { + + // Create a schema. + // + // Creates a new schema for catalog in the Metatastore. The caller must be a + // metastore admin, or have the **CREATE_SCHEMA** privilege in the parent + // catalog. + Create(ctx context.Context, request CreateSchema) (*SchemaInfo, error) + + // Delete a schema. + // + // Deletes the specified schema from the parent catalog. The caller must be the + // owner of the schema or an owner of the parent catalog. + Delete(ctx context.Context, request DeleteSchemaRequest) error + + // Delete a schema. + // + // Deletes the specified schema from the parent catalog. The caller must be the + // owner of the schema or an owner of the parent catalog. + DeleteByFullName(ctx context.Context, fullName string) error + + // Get a schema. + // + // Gets the specified schema within the metastore. The caller must be a + // metastore admin, the owner of the schema, or a user that has the + // **USE_SCHEMA** privilege on the schema. + Get(ctx context.Context, request GetSchemaRequest) (*SchemaInfo, error) + + // Get a schema. + // + // Gets the specified schema within the metastore. The caller must be a + // metastore admin, the owner of the schema, or a user that has the + // **USE_SCHEMA** privilege on the schema. + GetByFullName(ctx context.Context, fullName string) (*SchemaInfo, error) + + // List schemas. + // + // Gets an array of schemas for a catalog in the metastore. If the caller is the + // metastore admin or the owner of the parent catalog, all schemas for the + // catalog will be retrieved. Otherwise, only schemas owned by the caller (or + // for which the caller has the **USE_SCHEMA** privilege) will be retrieved. + // There is no guarantee of a specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo] + + // List schemas. + // + // Gets an array of schemas for a catalog in the metastore. If the caller is the + // metastore admin or the owner of the parent catalog, all schemas for the + // catalog will be retrieved. Otherwise, only schemas owned by the caller (or + // for which the caller has the **USE_SCHEMA** privilege) will be retrieved. + // There is no guarantee of a specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListSchemasRequest) ([]SchemaInfo, error) + + // SchemaInfoNameToFullNameMap calls [SchemasPreviewAPI.ListAll] and creates a map of results with [SchemaInfo].Name as key and [SchemaInfo].FullName as value. + // + // Returns an error if there's more than one [SchemaInfo] with the same .Name. + // + // Note: All [SchemaInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + SchemaInfoNameToFullNameMap(ctx context.Context, request ListSchemasRequest) (map[string]string, error) + + // GetByName calls [SchemasPreviewAPI.SchemaInfoNameToFullNameMap] and returns a single [SchemaInfo]. + // + // Returns an error if there's more than one [SchemaInfo] with the same .Name. + // + // Note: All [SchemaInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*SchemaInfo, error) + + // Update a schema. + // + // Updates a schema for a catalog. The caller must be the owner of the schema or + // a metastore admin. If the caller is a metastore admin, only the __owner__ + // field can be changed in the update. If the __name__ field must be updated, + // the caller must be a metastore admin or have the **CREATE_SCHEMA** privilege + // on the parent catalog. + Update(ctx context.Context, request UpdateSchema) (*SchemaInfo, error) +} + +func NewSchemasPreview(client *client.DatabricksClient) *SchemasPreviewAPI { + return &SchemasPreviewAPI{ + schemasPreviewImpl: schemasPreviewImpl{ + client: client, + }, + } +} + +// A schema (also called a database) is the second layer of Unity Catalog’s +// three-level namespace. A schema organizes tables, views and functions. To +// access (or list) a table or view in a schema, users must have the USE_SCHEMA +// data permission on the schema and its parent catalog, and they must have the +// SELECT permission on the table or view. +type SchemasPreviewAPI struct { + schemasPreviewImpl +} + +// Delete a schema. +// +// Deletes the specified schema from the parent catalog. The caller must be the +// owner of the schema or an owner of the parent catalog. +func (a *SchemasPreviewAPI) DeleteByFullName(ctx context.Context, fullName string) error { + return a.schemasPreviewImpl.Delete(ctx, DeleteSchemaRequest{ + FullName: fullName, + }) +} + +// Get a schema. +// +// Gets the specified schema within the metastore. The caller must be a +// metastore admin, the owner of the schema, or a user that has the +// **USE_SCHEMA** privilege on the schema. +func (a *SchemasPreviewAPI) GetByFullName(ctx context.Context, fullName string) (*SchemaInfo, error) { + return a.schemasPreviewImpl.Get(ctx, GetSchemaRequest{ + FullName: fullName, + }) +} + +// SchemaInfoNameToFullNameMap calls [SchemasPreviewAPI.ListAll] and creates a map of results with [SchemaInfo].Name as key and [SchemaInfo].FullName as value. +// +// Returns an error if there's more than one [SchemaInfo] with the same .Name. +// +// Note: All [SchemaInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *SchemasPreviewAPI) SchemaInfoNameToFullNameMap(ctx context.Context, request ListSchemasRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.FullName + } + return mapping, nil +} + +// GetByName calls [SchemasPreviewAPI.SchemaInfoNameToFullNameMap] and returns a single [SchemaInfo]. +// +// Returns an error if there's more than one [SchemaInfo] with the same .Name. +// +// Note: All [SchemaInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *SchemasPreviewAPI) GetByName(ctx context.Context, name string) (*SchemaInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListSchemasRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]SchemaInfo{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("SchemaInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of SchemaInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type StorageCredentialsPreviewInterface interface { + + // Create a storage credential. + // + // Creates a new storage credential. + Create(ctx context.Context, request CreateStorageCredential) (*StorageCredentialInfo, error) + + // Delete a credential. + // + // Deletes a storage credential from the metastore. The caller must be an owner + // of the storage credential. + Delete(ctx context.Context, request DeleteStorageCredentialRequest) error + + // Delete a credential. + // + // Deletes a storage credential from the metastore. The caller must be an owner + // of the storage credential. + DeleteByName(ctx context.Context, name string) error + + // Get a credential. + // + // Gets a storage credential from the metastore. The caller must be a metastore + // admin, the owner of the storage credential, or have some permission on the + // storage credential. + Get(ctx context.Context, request GetStorageCredentialRequest) (*StorageCredentialInfo, error) + + // Get a credential. + // + // Gets a storage credential from the metastore. The caller must be a metastore + // admin, the owner of the storage credential, or have some permission on the + // storage credential. + GetByName(ctx context.Context, name string) (*StorageCredentialInfo, error) + + // List credentials. + // + // Gets an array of storage credentials (as __StorageCredentialInfo__ objects). + // The array is limited to only those storage credentials the caller has + // permission to access. If the caller is a metastore admin, retrieval of + // credentials is unrestricted. There is no guarantee of a specific ordering of + // the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] + + // List credentials. + // + // Gets an array of storage credentials (as __StorageCredentialInfo__ objects). + // The array is limited to only those storage credentials the caller has + // permission to access. If the caller is a metastore admin, retrieval of + // credentials is unrestricted. There is no guarantee of a specific ordering of + // the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListStorageCredentialsRequest) ([]StorageCredentialInfo, error) + + // StorageCredentialInfoNameToIdMap calls [StorageCredentialsPreviewAPI.ListAll] and creates a map of results with [StorageCredentialInfo].Name as key and [StorageCredentialInfo].Id as value. + // + // Returns an error if there's more than one [StorageCredentialInfo] with the same .Name. + // + // Note: All [StorageCredentialInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + StorageCredentialInfoNameToIdMap(ctx context.Context, request ListStorageCredentialsRequest) (map[string]string, error) + + // Update a credential. + // + // Updates a storage credential on the metastore. + Update(ctx context.Context, request UpdateStorageCredential) (*StorageCredentialInfo, error) + + // Validate a storage credential. + // + // Validates a storage credential. At least one of __external_location_name__ + // and __url__ need to be provided. If only one of them is provided, it will be + // used for validation. And if both are provided, the __url__ will be used for + // validation, and __external_location_name__ will be ignored when checking + // overlapping urls. + // + // Either the __storage_credential_name__ or the cloud-specific credential must + // be provided. + // + // The caller must be a metastore admin or the storage credential owner or have + // the **CREATE_EXTERNAL_LOCATION** privilege on the metastore and the storage + // credential. + Validate(ctx context.Context, request ValidateStorageCredential) (*ValidateStorageCredentialResponse, error) +} + +func NewStorageCredentialsPreview(client *client.DatabricksClient) *StorageCredentialsPreviewAPI { + return &StorageCredentialsPreviewAPI{ + storageCredentialsPreviewImpl: storageCredentialsPreviewImpl{ + client: client, + }, + } +} + +// A storage credential represents an authentication and authorization mechanism +// for accessing data stored on your cloud tenant. Each storage credential is +// subject to Unity Catalog access-control policies that control which users and +// groups can access the credential. If a user does not have access to a storage +// credential in Unity Catalog, the request fails and Unity Catalog does not +// attempt to authenticate to your cloud tenant on the user’s behalf. +// +// Databricks recommends using external locations rather than using storage +// credentials directly. +// +// To create storage credentials, you must be a Databricks account admin. The +// account admin who creates the storage credential can delegate ownership to +// another user or group to manage permissions on it. +type StorageCredentialsPreviewAPI struct { + storageCredentialsPreviewImpl +} + +// Delete a credential. +// +// Deletes a storage credential from the metastore. The caller must be an owner +// of the storage credential. +func (a *StorageCredentialsPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.storageCredentialsPreviewImpl.Delete(ctx, DeleteStorageCredentialRequest{ + Name: name, + }) +} + +// Get a credential. +// +// Gets a storage credential from the metastore. The caller must be a metastore +// admin, the owner of the storage credential, or have some permission on the +// storage credential. +func (a *StorageCredentialsPreviewAPI) GetByName(ctx context.Context, name string) (*StorageCredentialInfo, error) { + return a.storageCredentialsPreviewImpl.Get(ctx, GetStorageCredentialRequest{ + Name: name, + }) +} + +// StorageCredentialInfoNameToIdMap calls [StorageCredentialsPreviewAPI.ListAll] and creates a map of results with [StorageCredentialInfo].Name as key and [StorageCredentialInfo].Id as value. +// +// Returns an error if there's more than one [StorageCredentialInfo] with the same .Name. +// +// Note: All [StorageCredentialInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *StorageCredentialsPreviewAPI) StorageCredentialInfoNameToIdMap(ctx context.Context, request ListStorageCredentialsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +type SystemSchemasPreviewInterface interface { + + // Disable a system schema. + // + // Disables the system schema and removes it from the system catalog. The caller + // must be an account admin or a metastore admin. + Disable(ctx context.Context, request DisableRequest) error + + // Disable a system schema. + // + // Disables the system schema and removes it from the system catalog. The caller + // must be an account admin or a metastore admin. + DisableByMetastoreIdAndSchemaName(ctx context.Context, metastoreId string, schemaName string) error + + // Enable a system schema. + // + // Enables the system schema and adds it to the system catalog. The caller must + // be an account admin or a metastore admin. + Enable(ctx context.Context, request EnableRequest) error + + // List system schemas. + // + // Gets an array of system schemas for a metastore. The caller must be an + // account admin or a metastore admin. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListSystemSchemasRequest) listing.Iterator[SystemSchemaInfo] + + // List system schemas. + // + // Gets an array of system schemas for a metastore. The caller must be an + // account admin or a metastore admin. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListSystemSchemasRequest) ([]SystemSchemaInfo, error) + + // List system schemas. + // + // Gets an array of system schemas for a metastore. The caller must be an + // account admin or a metastore admin. + ListByMetastoreId(ctx context.Context, metastoreId string) (*ListSystemSchemasResponse, error) +} + +func NewSystemSchemasPreview(client *client.DatabricksClient) *SystemSchemasPreviewAPI { + return &SystemSchemasPreviewAPI{ + systemSchemasPreviewImpl: systemSchemasPreviewImpl{ + client: client, + }, + } +} + +// A system schema is a schema that lives within the system catalog. A system +// schema may contain information about customer usage of Unity Catalog such as +// audit-logs, billing-logs, lineage information, etc. +type SystemSchemasPreviewAPI struct { + systemSchemasPreviewImpl +} + +// Disable a system schema. +// +// Disables the system schema and removes it from the system catalog. The caller +// must be an account admin or a metastore admin. +func (a *SystemSchemasPreviewAPI) DisableByMetastoreIdAndSchemaName(ctx context.Context, metastoreId string, schemaName string) error { + return a.systemSchemasPreviewImpl.Disable(ctx, DisableRequest{ + MetastoreId: metastoreId, + SchemaName: schemaName, + }) +} + +// List system schemas. +// +// Gets an array of system schemas for a metastore. The caller must be an +// account admin or a metastore admin. +func (a *SystemSchemasPreviewAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListSystemSchemasResponse, error) { + return a.systemSchemasPreviewImpl.internalList(ctx, ListSystemSchemasRequest{ + MetastoreId: metastoreId, + }) +} + +type TableConstraintsPreviewInterface interface { + + // Create a table constraint. + // + // Creates a new table constraint. + // + // For the table constraint creation to succeed, the user must satisfy both of + // these conditions: - the user must have the **USE_CATALOG** privilege on the + // table's parent catalog, the **USE_SCHEMA** privilege on the table's parent + // schema, and be the owner of the table. - if the new constraint is a + // __ForeignKeyConstraint__, the user must have the **USE_CATALOG** privilege on + // the referenced parent table's catalog, the **USE_SCHEMA** privilege on the + // referenced parent table's schema, and be the owner of the referenced parent + // table. + Create(ctx context.Context, request CreateTableConstraint) (*TableConstraint, error) + + // Delete a table constraint. + // + // Deletes a table constraint. + // + // For the table constraint deletion to succeed, the user must satisfy both of + // these conditions: - the user must have the **USE_CATALOG** privilege on the + // table's parent catalog, the **USE_SCHEMA** privilege on the table's parent + // schema, and be the owner of the table. - if __cascade__ argument is **true**, + // the user must have the following permissions on all of the child tables: the + // **USE_CATALOG** privilege on the table's catalog, the **USE_SCHEMA** + // privilege on the table's schema, and be the owner of the table. + Delete(ctx context.Context, request DeleteTableConstraintRequest) error + + // Delete a table constraint. + // + // Deletes a table constraint. + // + // For the table constraint deletion to succeed, the user must satisfy both of + // these conditions: - the user must have the **USE_CATALOG** privilege on the + // table's parent catalog, the **USE_SCHEMA** privilege on the table's parent + // schema, and be the owner of the table. - if __cascade__ argument is **true**, + // the user must have the following permissions on all of the child tables: the + // **USE_CATALOG** privilege on the table's catalog, the **USE_SCHEMA** + // privilege on the table's schema, and be the owner of the table. + DeleteByFullName(ctx context.Context, fullName string) error +} + +func NewTableConstraintsPreview(client *client.DatabricksClient) *TableConstraintsPreviewAPI { + return &TableConstraintsPreviewAPI{ + tableConstraintsPreviewImpl: tableConstraintsPreviewImpl{ + client: client, + }, + } +} + +// Primary key and foreign key constraints encode relationships between fields +// in tables. +// +// Primary and foreign keys are informational only and are not enforced. Foreign +// keys must reference a primary key in another table. This primary key is the +// parent constraint of the foreign key and the table this primary key is on is +// the parent table of the foreign key. Similarly, the foreign key is the child +// constraint of its referenced primary key; the table of the foreign key is the +// child table of the primary key. +// +// You can declare primary keys and foreign keys as part of the table +// specification during table creation. You can also add or drop constraints on +// existing tables. +type TableConstraintsPreviewAPI struct { + tableConstraintsPreviewImpl +} + +// Delete a table constraint. +// +// Deletes a table constraint. +// +// For the table constraint deletion to succeed, the user must satisfy both of +// these conditions: - the user must have the **USE_CATALOG** privilege on the +// table's parent catalog, the **USE_SCHEMA** privilege on the table's parent +// schema, and be the owner of the table. - if __cascade__ argument is **true**, +// the user must have the following permissions on all of the child tables: the +// **USE_CATALOG** privilege on the table's catalog, the **USE_SCHEMA** +// privilege on the table's schema, and be the owner of the table. +func (a *TableConstraintsPreviewAPI) DeleteByFullName(ctx context.Context, fullName string) error { + return a.tableConstraintsPreviewImpl.Delete(ctx, DeleteTableConstraintRequest{ + FullName: fullName, + }) +} + +type TablesPreviewInterface interface { + + // Delete a table. + // + // Deletes a table from the specified parent catalog and schema. The caller must + // be the owner of the parent catalog, have the **USE_CATALOG** privilege on the + // parent catalog and be the owner of the parent schema, or be the owner of the + // table and have the **USE_CATALOG** privilege on the parent catalog and the + // **USE_SCHEMA** privilege on the parent schema. + Delete(ctx context.Context, request DeleteTableRequest) error + + // Delete a table. + // + // Deletes a table from the specified parent catalog and schema. The caller must + // be the owner of the parent catalog, have the **USE_CATALOG** privilege on the + // parent catalog and be the owner of the parent schema, or be the owner of the + // table and have the **USE_CATALOG** privilege on the parent catalog and the + // **USE_SCHEMA** privilege on the parent schema. + DeleteByFullName(ctx context.Context, fullName string) error + + // Get boolean reflecting if table exists. + // + // Gets if a table exists in the metastore for a specific catalog and schema. + // The caller must satisfy one of the following requirements: * Be a metastore + // admin * Be the owner of the parent catalog * Be the owner of the parent + // schema and have the USE_CATALOG privilege on the parent catalog * Have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema, and either be the table owner or have the + // SELECT privilege on the table. * Have BROWSE privilege on the parent catalog + // * Have BROWSE privilege on the parent schema. + Exists(ctx context.Context, request ExistsRequest) (*TableExistsResponse, error) + + // Get boolean reflecting if table exists. + // + // Gets if a table exists in the metastore for a specific catalog and schema. + // The caller must satisfy one of the following requirements: * Be a metastore + // admin * Be the owner of the parent catalog * Be the owner of the parent + // schema and have the USE_CATALOG privilege on the parent catalog * Have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema, and either be the table owner or have the + // SELECT privilege on the table. * Have BROWSE privilege on the parent catalog + // * Have BROWSE privilege on the parent schema. + ExistsByFullName(ctx context.Context, fullName string) (*TableExistsResponse, error) + + // Get a table. + // + // Gets a table from the metastore for a specific catalog and schema. The caller + // must satisfy one of the following requirements: * Be a metastore admin * Be + // the owner of the parent catalog * Be the owner of the parent schema and have + // the USE_CATALOG privilege on the parent catalog * Have the **USE_CATALOG** + // privilege on the parent catalog and the **USE_SCHEMA** privilege on the + // parent schema, and either be the table owner or have the SELECT privilege on + // the table. + Get(ctx context.Context, request GetTableRequest) (*TableInfo, error) + + // Get a table. + // + // Gets a table from the metastore for a specific catalog and schema. The caller + // must satisfy one of the following requirements: * Be a metastore admin * Be + // the owner of the parent catalog * Be the owner of the parent schema and have + // the USE_CATALOG privilege on the parent catalog * Have the **USE_CATALOG** + // privilege on the parent catalog and the **USE_SCHEMA** privilege on the + // parent schema, and either be the table owner or have the SELECT privilege on + // the table. + GetByFullName(ctx context.Context, fullName string) (*TableInfo, error) + + // List tables. + // + // Gets an array of all tables for the current metastore under the parent + // catalog and schema. The caller must be a metastore admin or an owner of (or + // have the **SELECT** privilege on) the table. For the latter case, the caller + // must also be the owner or have the **USE_CATALOG** privilege on the parent + // catalog and the **USE_SCHEMA** privilege on the parent schema. There is no + // guarantee of a specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo] + + // List tables. + // + // Gets an array of all tables for the current metastore under the parent + // catalog and schema. The caller must be a metastore admin or an owner of (or + // have the **SELECT** privilege on) the table. For the latter case, the caller + // must also be the owner or have the **USE_CATALOG** privilege on the parent + // catalog and the **USE_SCHEMA** privilege on the parent schema. There is no + // guarantee of a specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListTablesRequest) ([]TableInfo, error) + + // TableInfoNameToTableIdMap calls [TablesPreviewAPI.ListAll] and creates a map of results with [TableInfo].Name as key and [TableInfo].TableId as value. + // + // Returns an error if there's more than one [TableInfo] with the same .Name. + // + // Note: All [TableInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + TableInfoNameToTableIdMap(ctx context.Context, request ListTablesRequest) (map[string]string, error) + + // GetByName calls [TablesPreviewAPI.TableInfoNameToTableIdMap] and returns a single [TableInfo]. + // + // Returns an error if there's more than one [TableInfo] with the same .Name. + // + // Note: All [TableInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*TableInfo, error) + + // List table summaries. + // + // Gets an array of summaries for tables for a schema and catalog within the + // metastore. The table summaries returned are either: + // + // * summaries for tables (within the current metastore and parent catalog and + // schema), when the user is a metastore admin, or: * summaries for tables and + // schemas (within the current metastore and parent catalog) for which the user + // has ownership or the **SELECT** privilege on the table and ownership or + // **USE_SCHEMA** privilege on the schema, provided that the user also has + // ownership or the **USE_CATALOG** privilege on the parent catalog. + // + // There is no guarantee of a specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + ListSummaries(ctx context.Context, request ListSummariesRequest) listing.Iterator[TableSummary] + + // List table summaries. + // + // Gets an array of summaries for tables for a schema and catalog within the + // metastore. The table summaries returned are either: + // + // * summaries for tables (within the current metastore and parent catalog and + // schema), when the user is a metastore admin, or: * summaries for tables and + // schemas (within the current metastore and parent catalog) for which the user + // has ownership or the **SELECT** privilege on the table and ownership or + // **USE_SCHEMA** privilege on the schema, provided that the user also has + // ownership or the **USE_CATALOG** privilege on the parent catalog. + // + // There is no guarantee of a specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + ListSummariesAll(ctx context.Context, request ListSummariesRequest) ([]TableSummary, error) + + // Update a table owner. + // + // Change the owner of the table. The caller must be the owner of the parent + // catalog, have the **USE_CATALOG** privilege on the parent catalog and be the + // owner of the parent schema, or be the owner of the table and have the + // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + // privilege on the parent schema. + Update(ctx context.Context, request UpdateTableRequest) error +} + +func NewTablesPreview(client *client.DatabricksClient) *TablesPreviewAPI { + return &TablesPreviewAPI{ + tablesPreviewImpl: tablesPreviewImpl{ + client: client, + }, + } +} + +// A table resides in the third layer of Unity Catalog’s three-level +// namespace. It contains rows of data. To create a table, users must have +// CREATE_TABLE and USE_SCHEMA permissions on the schema, and they must have the +// USE_CATALOG permission on its parent catalog. To query a table, users must +// have the SELECT permission on the table, and they must have the USE_CATALOG +// permission on its parent catalog and the USE_SCHEMA permission on its parent +// schema. +// +// A table can be managed or external. From an API perspective, a __VIEW__ is a +// particular kind of table (rather than a managed or external table). +type TablesPreviewAPI struct { + tablesPreviewImpl +} + +// Delete a table. +// +// Deletes a table from the specified parent catalog and schema. The caller must +// be the owner of the parent catalog, have the **USE_CATALOG** privilege on the +// parent catalog and be the owner of the parent schema, or be the owner of the +// table and have the **USE_CATALOG** privilege on the parent catalog and the +// **USE_SCHEMA** privilege on the parent schema. +func (a *TablesPreviewAPI) DeleteByFullName(ctx context.Context, fullName string) error { + return a.tablesPreviewImpl.Delete(ctx, DeleteTableRequest{ + FullName: fullName, + }) +} + +// Get boolean reflecting if table exists. +// +// Gets if a table exists in the metastore for a specific catalog and schema. +// The caller must satisfy one of the following requirements: * Be a metastore +// admin * Be the owner of the parent catalog * Be the owner of the parent +// schema and have the USE_CATALOG privilege on the parent catalog * Have the +// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** +// privilege on the parent schema, and either be the table owner or have the +// SELECT privilege on the table. * Have BROWSE privilege on the parent catalog +// * Have BROWSE privilege on the parent schema. +func (a *TablesPreviewAPI) ExistsByFullName(ctx context.Context, fullName string) (*TableExistsResponse, error) { + return a.tablesPreviewImpl.Exists(ctx, ExistsRequest{ + FullName: fullName, + }) +} + +// Get a table. +// +// Gets a table from the metastore for a specific catalog and schema. The caller +// must satisfy one of the following requirements: * Be a metastore admin * Be +// the owner of the parent catalog * Be the owner of the parent schema and have +// the USE_CATALOG privilege on the parent catalog * Have the **USE_CATALOG** +// privilege on the parent catalog and the **USE_SCHEMA** privilege on the +// parent schema, and either be the table owner or have the SELECT privilege on +// the table. +func (a *TablesPreviewAPI) GetByFullName(ctx context.Context, fullName string) (*TableInfo, error) { + return a.tablesPreviewImpl.Get(ctx, GetTableRequest{ + FullName: fullName, + }) +} + +// TableInfoNameToTableIdMap calls [TablesPreviewAPI.ListAll] and creates a map of results with [TableInfo].Name as key and [TableInfo].TableId as value. +// +// Returns an error if there's more than one [TableInfo] with the same .Name. +// +// Note: All [TableInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *TablesPreviewAPI) TableInfoNameToTableIdMap(ctx context.Context, request ListTablesRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.TableId + } + return mapping, nil +} + +// GetByName calls [TablesPreviewAPI.TableInfoNameToTableIdMap] and returns a single [TableInfo]. +// +// Returns an error if there's more than one [TableInfo] with the same .Name. +// +// Note: All [TableInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *TablesPreviewAPI) GetByName(ctx context.Context, name string) (*TableInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListTablesRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]TableInfo{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("TableInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of TableInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type TemporaryTableCredentialsPreviewInterface interface { + + // Generate a temporary table credential. + // + // Get a short-lived credential for directly accessing the table data on cloud + // storage. The metastore must have external_access_enabled flag set to true + // (default false). The caller must have EXTERNAL_USE_SCHEMA privilege on the + // parent schema and this privilege can only be granted by catalog owners. + GenerateTemporaryTableCredentials(ctx context.Context, request GenerateTemporaryTableCredentialRequest) (*GenerateTemporaryTableCredentialResponse, error) +} + +func NewTemporaryTableCredentialsPreview(client *client.DatabricksClient) *TemporaryTableCredentialsPreviewAPI { + return &TemporaryTableCredentialsPreviewAPI{ + temporaryTableCredentialsPreviewImpl: temporaryTableCredentialsPreviewImpl{ + client: client, + }, + } +} + +// Temporary Table Credentials refer to short-lived, downscoped credentials used +// to access cloud storage locationswhere table data is stored in Databricks. +// These credentials are employed to provide secure and time-limitedaccess to +// data in cloud environments such as AWS, Azure, and Google Cloud. Each cloud +// provider has its own typeof credentials: AWS uses temporary session tokens +// via AWS Security Token Service (STS), Azure utilizesShared Access Signatures +// (SAS) for its data storage services, and Google Cloud supports temporary +// credentialsthrough OAuth 2.0.Temporary table credentials ensure that data +// access is limited in scope and duration, reducing the risk ofunauthorized +// access or misuse. To use the temporary table credentials API, a metastore +// admin needs to enable the external_access_enabled flag (off by default) at +// the metastore level, and user needs to be granted the EXTERNAL USE SCHEMA +// permission at the schema level by catalog admin. Note that EXTERNAL USE +// SCHEMA is a schema level permission that can only be granted by catalog admin +// explicitly and is not included in schema ownership or ALL PRIVILEGES on the +// schema for security reason. +type TemporaryTableCredentialsPreviewAPI struct { + temporaryTableCredentialsPreviewImpl +} + +type VolumesPreviewInterface interface { + + // Create a Volume. + // + // Creates a new volume. + // + // The user could create either an external volume or a managed volume. An + // external volume will be created in the specified external location, while a + // managed volume will be located in the default location which is specified by + // the parent schema, or the parent catalog, or the Metastore. + // + // For the volume creation to succeed, the user must satisfy following + // conditions: - The caller must be a metastore admin, or be the owner of the + // parent catalog and schema, or have the **USE_CATALOG** privilege on the + // parent catalog and the **USE_SCHEMA** privilege on the parent schema. - The + // caller must have **CREATE VOLUME** privilege on the parent schema. + // + // For an external volume, following conditions also need to satisfy - The + // caller must have **CREATE EXTERNAL VOLUME** privilege on the external + // location. - There are no other tables, nor volumes existing in the specified + // storage location. - The specified storage location is not under the location + // of other tables, nor volumes, or catalogs or schemas. + Create(ctx context.Context, request CreateVolumeRequestContent) (*VolumeInfo, error) + + // Delete a Volume. + // + // Deletes a volume from the specified parent catalog and schema. + // + // The caller must be a metastore admin or an owner of the volume. For the + // latter case, the caller must also be the owner or have the **USE_CATALOG** + // privilege on the parent catalog and the **USE_SCHEMA** privilege on the + // parent schema. + Delete(ctx context.Context, request DeleteVolumeRequest) error + + // Delete a Volume. + // + // Deletes a volume from the specified parent catalog and schema. + // + // The caller must be a metastore admin or an owner of the volume. For the + // latter case, the caller must also be the owner or have the **USE_CATALOG** + // privilege on the parent catalog and the **USE_SCHEMA** privilege on the + // parent schema. + DeleteByName(ctx context.Context, name string) error + + // List Volumes. + // + // Gets an array of volumes for the current metastore under the parent catalog + // and schema. + // + // The returned volumes are filtered based on the privileges of the calling + // user. For example, the metastore admin is able to list all the volumes. A + // regular user needs to be the owner or have the **READ VOLUME** privilege on + // the volume to recieve the volumes in the response. For the latter case, the + // caller must also be the owner or have the **USE_CATALOG** privilege on the + // parent catalog and the **USE_SCHEMA** privilege on the parent schema. + // + // There is no guarantee of a specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListVolumesRequest) listing.Iterator[VolumeInfo] + + // List Volumes. + // + // Gets an array of volumes for the current metastore under the parent catalog + // and schema. + // + // The returned volumes are filtered based on the privileges of the calling + // user. For example, the metastore admin is able to list all the volumes. A + // regular user needs to be the owner or have the **READ VOLUME** privilege on + // the volume to recieve the volumes in the response. For the latter case, the + // caller must also be the owner or have the **USE_CATALOG** privilege on the + // parent catalog and the **USE_SCHEMA** privilege on the parent schema. + // + // There is no guarantee of a specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListVolumesRequest) ([]VolumeInfo, error) + + // VolumeInfoNameToVolumeIdMap calls [VolumesPreviewAPI.ListAll] and creates a map of results with [VolumeInfo].Name as key and [VolumeInfo].VolumeId as value. + // + // Returns an error if there's more than one [VolumeInfo] with the same .Name. + // + // Note: All [VolumeInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + VolumeInfoNameToVolumeIdMap(ctx context.Context, request ListVolumesRequest) (map[string]string, error) + + // GetByName calls [VolumesPreviewAPI.VolumeInfoNameToVolumeIdMap] and returns a single [VolumeInfo]. + // + // Returns an error if there's more than one [VolumeInfo] with the same .Name. + // + // Note: All [VolumeInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*VolumeInfo, error) + + // Get a Volume. + // + // Gets a volume from the metastore for a specific catalog and schema. + // + // The caller must be a metastore admin or an owner of (or have the **READ + // VOLUME** privilege on) the volume. For the latter case, the caller must also + // be the owner or have the **USE_CATALOG** privilege on the parent catalog and + // the **USE_SCHEMA** privilege on the parent schema. + Read(ctx context.Context, request ReadVolumeRequest) (*VolumeInfo, error) + + // Get a Volume. + // + // Gets a volume from the metastore for a specific catalog and schema. + // + // The caller must be a metastore admin or an owner of (or have the **READ + // VOLUME** privilege on) the volume. For the latter case, the caller must also + // be the owner or have the **USE_CATALOG** privilege on the parent catalog and + // the **USE_SCHEMA** privilege on the parent schema. + ReadByName(ctx context.Context, name string) (*VolumeInfo, error) + + // Update a Volume. + // + // Updates the specified volume under the specified parent catalog and schema. + // + // The caller must be a metastore admin or an owner of the volume. For the + // latter case, the caller must also be the owner or have the **USE_CATALOG** + // privilege on the parent catalog and the **USE_SCHEMA** privilege on the + // parent schema. + // + // Currently only the name, the owner or the comment of the volume could be + // updated. + Update(ctx context.Context, request UpdateVolumeRequestContent) (*VolumeInfo, error) +} + +func NewVolumesPreview(client *client.DatabricksClient) *VolumesPreviewAPI { + return &VolumesPreviewAPI{ + volumesPreviewImpl: volumesPreviewImpl{ + client: client, + }, + } +} + +// Volumes are a Unity Catalog (UC) capability for accessing, storing, +// governing, organizing and processing files. Use cases include running machine +// learning on unstructured data such as image, audio, video, or PDF files, +// organizing data sets during the data exploration stages in data science, +// working with libraries that require access to the local file system on +// cluster machines, storing library and config files of arbitrary formats such +// as .whl or .txt centrally and providing secure access across workspaces to +// it, or transforming and querying non-tabular data files in ETL. +type VolumesPreviewAPI struct { + volumesPreviewImpl +} + +// Delete a Volume. +// +// Deletes a volume from the specified parent catalog and schema. +// +// The caller must be a metastore admin or an owner of the volume. For the +// latter case, the caller must also be the owner or have the **USE_CATALOG** +// privilege on the parent catalog and the **USE_SCHEMA** privilege on the +// parent schema. +func (a *VolumesPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.volumesPreviewImpl.Delete(ctx, DeleteVolumeRequest{ + Name: name, + }) +} + +// VolumeInfoNameToVolumeIdMap calls [VolumesPreviewAPI.ListAll] and creates a map of results with [VolumeInfo].Name as key and [VolumeInfo].VolumeId as value. +// +// Returns an error if there's more than one [VolumeInfo] with the same .Name. +// +// Note: All [VolumeInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *VolumesPreviewAPI) VolumeInfoNameToVolumeIdMap(ctx context.Context, request ListVolumesRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.VolumeId + } + return mapping, nil +} + +// GetByName calls [VolumesPreviewAPI.VolumeInfoNameToVolumeIdMap] and returns a single [VolumeInfo]. +// +// Returns an error if there's more than one [VolumeInfo] with the same .Name. +// +// Note: All [VolumeInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *VolumesPreviewAPI) GetByName(ctx context.Context, name string) (*VolumeInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListVolumesRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]VolumeInfo{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("VolumeInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of VolumeInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +// Get a Volume. +// +// Gets a volume from the metastore for a specific catalog and schema. +// +// The caller must be a metastore admin or an owner of (or have the **READ +// VOLUME** privilege on) the volume. For the latter case, the caller must also +// be the owner or have the **USE_CATALOG** privilege on the parent catalog and +// the **USE_SCHEMA** privilege on the parent schema. +func (a *VolumesPreviewAPI) ReadByName(ctx context.Context, name string) (*VolumeInfo, error) { + return a.volumesPreviewImpl.Read(ctx, ReadVolumeRequest{ + Name: name, + }) +} + +type WorkspaceBindingsPreviewInterface interface { + + // Get catalog workspace bindings. + // + // Gets workspace bindings of the catalog. The caller must be a metastore admin + // or an owner of the catalog. + Get(ctx context.Context, request GetWorkspaceBindingRequest) (*CurrentWorkspaceBindings, error) + + // Get catalog workspace bindings. + // + // Gets workspace bindings of the catalog. The caller must be a metastore admin + // or an owner of the catalog. + GetByName(ctx context.Context, name string) (*CurrentWorkspaceBindings, error) + + // Get securable workspace bindings. + // + // Gets workspace bindings of the securable. The caller must be a metastore + // admin or an owner of the securable. + // + // This method is generated by Databricks SDK Code Generator. + GetBindings(ctx context.Context, request GetBindingsRequest) listing.Iterator[WorkspaceBinding] + + // Get securable workspace bindings. + // + // Gets workspace bindings of the securable. The caller must be a metastore + // admin or an owner of the securable. + // + // This method is generated by Databricks SDK Code Generator. + GetBindingsAll(ctx context.Context, request GetBindingsRequest) ([]WorkspaceBinding, error) + + // Get securable workspace bindings. + // + // Gets workspace bindings of the securable. The caller must be a metastore + // admin or an owner of the securable. + GetBindingsBySecurableTypeAndSecurableName(ctx context.Context, securableType GetBindingsSecurableType, securableName string) (*WorkspaceBindingsResponse, error) + + // Update catalog workspace bindings. + // + // Updates workspace bindings of the catalog. The caller must be a metastore + // admin or an owner of the catalog. + Update(ctx context.Context, request UpdateWorkspaceBindings) (*CurrentWorkspaceBindings, error) + + // Update securable workspace bindings. + // + // Updates workspace bindings of the securable. The caller must be a metastore + // admin or an owner of the securable. + UpdateBindings(ctx context.Context, request UpdateWorkspaceBindingsParameters) (*WorkspaceBindingsResponse, error) +} + +func NewWorkspaceBindingsPreview(client *client.DatabricksClient) *WorkspaceBindingsPreviewAPI { + return &WorkspaceBindingsPreviewAPI{ + workspaceBindingsPreviewImpl: workspaceBindingsPreviewImpl{ + client: client, + }, + } +} + +// A securable in Databricks can be configured as __OPEN__ or __ISOLATED__. An +// __OPEN__ securable can be accessed from any workspace, while an __ISOLATED__ +// securable can only be accessed from a configured list of workspaces. This API +// allows you to configure (bind) securables to workspaces. +// +// NOTE: The __isolation_mode__ is configured for the securable itself (using +// its Update method) and the workspace bindings are only consulted when the +// securable's __isolation_mode__ is set to __ISOLATED__. +// +// A securable's workspace bindings can be configured by a metastore admin or +// the owner of the securable. +// +// The original path (/api/2.1/unity-catalog/workspace-bindings/catalogs/{name}) +// is deprecated. Please use the new path +// (/api/2.1/unity-catalog/bindings/{securable_type}/{securable_name}) which +// introduces the ability to bind a securable in READ_ONLY mode (catalogs only). +// +// Securable types that support binding: - catalog - storage_credential - +// external_location +type WorkspaceBindingsPreviewAPI struct { + workspaceBindingsPreviewImpl +} + +// Get catalog workspace bindings. +// +// Gets workspace bindings of the catalog. The caller must be a metastore admin +// or an owner of the catalog. +func (a *WorkspaceBindingsPreviewAPI) GetByName(ctx context.Context, name string) (*CurrentWorkspaceBindings, error) { + return a.workspaceBindingsPreviewImpl.Get(ctx, GetWorkspaceBindingRequest{ + Name: name, + }) +} + +// Get securable workspace bindings. +// +// Gets workspace bindings of the securable. The caller must be a metastore +// admin or an owner of the securable. +func (a *WorkspaceBindingsPreviewAPI) GetBindingsBySecurableTypeAndSecurableName(ctx context.Context, securableType GetBindingsSecurableType, securableName string) (*WorkspaceBindingsResponse, error) { + return a.workspaceBindingsPreviewImpl.internalGetBindings(ctx, GetBindingsRequest{ + SecurableType: securableType, + SecurableName: securableName, + }) +} diff --git a/catalog/v2preview/client.go b/catalog/v2preview/client.go new file mode 100755 index 000000000..e585fb838 --- /dev/null +++ b/catalog/v2preview/client.go @@ -0,0 +1,815 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package catalogpreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type AccountMetastoreAssignmentsPreviewClient struct { + AccountMetastoreAssignmentsPreviewInterface + + Config *config.Config +} + +func NewAccountMetastoreAssignmentsPreviewClient(cfg *config.Config) (*AccountMetastoreAssignmentsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &AccountMetastoreAssignmentsPreviewClient{ + Config: cfg, + AccountMetastoreAssignmentsPreviewInterface: NewAccountMetastoreAssignmentsPreview(apiClient), + }, nil +} + +type AccountMetastoresPreviewClient struct { + AccountMetastoresPreviewInterface + + Config *config.Config +} + +func NewAccountMetastoresPreviewClient(cfg *config.Config) (*AccountMetastoresPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &AccountMetastoresPreviewClient{ + Config: cfg, + AccountMetastoresPreviewInterface: NewAccountMetastoresPreview(apiClient), + }, nil +} + +type AccountStorageCredentialsPreviewClient struct { + AccountStorageCredentialsPreviewInterface + + Config *config.Config +} + +func NewAccountStorageCredentialsPreviewClient(cfg *config.Config) (*AccountStorageCredentialsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &AccountStorageCredentialsPreviewClient{ + Config: cfg, + AccountStorageCredentialsPreviewInterface: NewAccountStorageCredentialsPreview(apiClient), + }, nil +} + +type ArtifactAllowlistsPreviewClient struct { + ArtifactAllowlistsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewArtifactAllowlistsPreviewClient(cfg *config.Config) (*ArtifactAllowlistsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ArtifactAllowlistsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ArtifactAllowlistsPreviewInterface: NewArtifactAllowlistsPreview(databricksClient), + }, nil +} + +type CatalogsPreviewClient struct { + CatalogsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewCatalogsPreviewClient(cfg *config.Config) (*CatalogsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &CatalogsPreviewClient{ + Config: cfg, + apiClient: apiClient, + CatalogsPreviewInterface: NewCatalogsPreview(databricksClient), + }, nil +} + +type ConnectionsPreviewClient struct { + ConnectionsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewConnectionsPreviewClient(cfg *config.Config) (*ConnectionsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ConnectionsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ConnectionsPreviewInterface: NewConnectionsPreview(databricksClient), + }, nil +} + +type CredentialsPreviewClient struct { + CredentialsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewCredentialsPreviewClient(cfg *config.Config) (*CredentialsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &CredentialsPreviewClient{ + Config: cfg, + apiClient: apiClient, + CredentialsPreviewInterface: NewCredentialsPreview(databricksClient), + }, nil +} + +type ExternalLocationsPreviewClient struct { + ExternalLocationsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewExternalLocationsPreviewClient(cfg *config.Config) (*ExternalLocationsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ExternalLocationsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ExternalLocationsPreviewInterface: NewExternalLocationsPreview(databricksClient), + }, nil +} + +type FunctionsPreviewClient struct { + FunctionsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewFunctionsPreviewClient(cfg *config.Config) (*FunctionsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &FunctionsPreviewClient{ + Config: cfg, + apiClient: apiClient, + FunctionsPreviewInterface: NewFunctionsPreview(databricksClient), + }, nil +} + +type GrantsPreviewClient struct { + GrantsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewGrantsPreviewClient(cfg *config.Config) (*GrantsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &GrantsPreviewClient{ + Config: cfg, + apiClient: apiClient, + GrantsPreviewInterface: NewGrantsPreview(databricksClient), + }, nil +} + +type MetastoresPreviewClient struct { + MetastoresPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewMetastoresPreviewClient(cfg *config.Config) (*MetastoresPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &MetastoresPreviewClient{ + Config: cfg, + apiClient: apiClient, + MetastoresPreviewInterface: NewMetastoresPreview(databricksClient), + }, nil +} + +type ModelVersionsPreviewClient struct { + ModelVersionsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewModelVersionsPreviewClient(cfg *config.Config) (*ModelVersionsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ModelVersionsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ModelVersionsPreviewInterface: NewModelVersionsPreview(databricksClient), + }, nil +} + +type OnlineTablesPreviewClient struct { + OnlineTablesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewOnlineTablesPreviewClient(cfg *config.Config) (*OnlineTablesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &OnlineTablesPreviewClient{ + Config: cfg, + apiClient: apiClient, + OnlineTablesPreviewInterface: NewOnlineTablesPreview(databricksClient), + }, nil +} + +type QualityMonitorsPreviewClient struct { + QualityMonitorsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewQualityMonitorsPreviewClient(cfg *config.Config) (*QualityMonitorsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &QualityMonitorsPreviewClient{ + Config: cfg, + apiClient: apiClient, + QualityMonitorsPreviewInterface: NewQualityMonitorsPreview(databricksClient), + }, nil +} + +type RegisteredModelsPreviewClient struct { + RegisteredModelsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewRegisteredModelsPreviewClient(cfg *config.Config) (*RegisteredModelsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &RegisteredModelsPreviewClient{ + Config: cfg, + apiClient: apiClient, + RegisteredModelsPreviewInterface: NewRegisteredModelsPreview(databricksClient), + }, nil +} + +type ResourceQuotasPreviewClient struct { + ResourceQuotasPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewResourceQuotasPreviewClient(cfg *config.Config) (*ResourceQuotasPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ResourceQuotasPreviewClient{ + Config: cfg, + apiClient: apiClient, + ResourceQuotasPreviewInterface: NewResourceQuotasPreview(databricksClient), + }, nil +} + +type SchemasPreviewClient struct { + SchemasPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewSchemasPreviewClient(cfg *config.Config) (*SchemasPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &SchemasPreviewClient{ + Config: cfg, + apiClient: apiClient, + SchemasPreviewInterface: NewSchemasPreview(databricksClient), + }, nil +} + +type StorageCredentialsPreviewClient struct { + StorageCredentialsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewStorageCredentialsPreviewClient(cfg *config.Config) (*StorageCredentialsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &StorageCredentialsPreviewClient{ + Config: cfg, + apiClient: apiClient, + StorageCredentialsPreviewInterface: NewStorageCredentialsPreview(databricksClient), + }, nil +} + +type SystemSchemasPreviewClient struct { + SystemSchemasPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewSystemSchemasPreviewClient(cfg *config.Config) (*SystemSchemasPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &SystemSchemasPreviewClient{ + Config: cfg, + apiClient: apiClient, + SystemSchemasPreviewInterface: NewSystemSchemasPreview(databricksClient), + }, nil +} + +type TableConstraintsPreviewClient struct { + TableConstraintsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewTableConstraintsPreviewClient(cfg *config.Config) (*TableConstraintsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &TableConstraintsPreviewClient{ + Config: cfg, + apiClient: apiClient, + TableConstraintsPreviewInterface: NewTableConstraintsPreview(databricksClient), + }, nil +} + +type TablesPreviewClient struct { + TablesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewTablesPreviewClient(cfg *config.Config) (*TablesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &TablesPreviewClient{ + Config: cfg, + apiClient: apiClient, + TablesPreviewInterface: NewTablesPreview(databricksClient), + }, nil +} + +type TemporaryTableCredentialsPreviewClient struct { + TemporaryTableCredentialsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewTemporaryTableCredentialsPreviewClient(cfg *config.Config) (*TemporaryTableCredentialsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &TemporaryTableCredentialsPreviewClient{ + Config: cfg, + apiClient: apiClient, + TemporaryTableCredentialsPreviewInterface: NewTemporaryTableCredentialsPreview(databricksClient), + }, nil +} + +type VolumesPreviewClient struct { + VolumesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewVolumesPreviewClient(cfg *config.Config) (*VolumesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &VolumesPreviewClient{ + Config: cfg, + apiClient: apiClient, + VolumesPreviewInterface: NewVolumesPreview(databricksClient), + }, nil +} + +type WorkspaceBindingsPreviewClient struct { + WorkspaceBindingsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewWorkspaceBindingsPreviewClient(cfg *config.Config) (*WorkspaceBindingsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &WorkspaceBindingsPreviewClient{ + Config: cfg, + apiClient: apiClient, + WorkspaceBindingsPreviewInterface: NewWorkspaceBindingsPreview(databricksClient), + }, nil +} diff --git a/catalog/v2preview/impl.go b/catalog/v2preview/impl.go new file mode 100755 index 000000000..e8ff2584b --- /dev/null +++ b/catalog/v2preview/impl.go @@ -0,0 +1,2111 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package catalogpreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just AccountMetastoreAssignmentsPreview API methods +type accountMetastoreAssignmentsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *accountMetastoreAssignmentsPreviewImpl) Create(ctx context.Context, request AccountsCreateMetastoreAssignment) error { + var createResponse CreateResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/metastores/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.MetastoreId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createResponse) + return err +} + +func (a *accountMetastoreAssignmentsPreviewImpl) Delete(ctx context.Context, request DeleteAccountMetastoreAssignmentRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/metastores/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.MetastoreId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *accountMetastoreAssignmentsPreviewImpl) Get(ctx context.Context, request GetAccountMetastoreAssignmentRequest) (*AccountsMetastoreAssignment, error) { + var accountsMetastoreAssignment AccountsMetastoreAssignment + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/metastore", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &accountsMetastoreAssignment) + return &accountsMetastoreAssignment, err +} + +// Get all workspaces assigned to a metastore. +// +// Gets a list of all Databricks workspace IDs that have been assigned to given +// metastore. +func (a *accountMetastoreAssignmentsPreviewImpl) List(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) listing.Iterator[int64] { + + getNextPage := func(ctx context.Context, req ListAccountMetastoreAssignmentsRequest) (*ListAccountMetastoreAssignmentsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAccountMetastoreAssignmentsResponse) []int64 { + return resp.WorkspaceIds + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all workspaces assigned to a metastore. +// +// Gets a list of all Databricks workspace IDs that have been assigned to given +// metastore. +func (a *accountMetastoreAssignmentsPreviewImpl) ListAll(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) ([]int64, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[int64](ctx, iterator) +} +func (a *accountMetastoreAssignmentsPreviewImpl) internalList(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) (*ListAccountMetastoreAssignmentsResponse, error) { + var listAccountMetastoreAssignmentsResponse ListAccountMetastoreAssignmentsResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/workspaces", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAccountMetastoreAssignmentsResponse) + return &listAccountMetastoreAssignmentsResponse, err +} + +func (a *accountMetastoreAssignmentsPreviewImpl) Update(ctx context.Context, request AccountsUpdateMetastoreAssignment) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/metastores/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.MetastoreId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just AccountMetastoresPreview API methods +type accountMetastoresPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *accountMetastoresPreviewImpl) Create(ctx context.Context, request AccountsCreateMetastore) (*AccountsMetastoreInfo, error) { + var accountsMetastoreInfo AccountsMetastoreInfo + path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &accountsMetastoreInfo) + return &accountsMetastoreInfo, err +} + +func (a *accountMetastoresPreviewImpl) Delete(ctx context.Context, request DeleteAccountMetastoreRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *accountMetastoresPreviewImpl) Get(ctx context.Context, request GetAccountMetastoreRequest) (*AccountsMetastoreInfo, error) { + var accountsMetastoreInfo AccountsMetastoreInfo + path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &accountsMetastoreInfo) + return &accountsMetastoreInfo, err +} + +// Get all metastores associated with an account. +// +// Gets all Unity Catalog metastores associated with an account specified by ID. +func (a *accountMetastoresPreviewImpl) List(ctx context.Context) listing.Iterator[MetastoreInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListMetastoresResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListMetastoresResponse) []MetastoreInfo { + return resp.Metastores + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all metastores associated with an account. +// +// Gets all Unity Catalog metastores associated with an account specified by ID. +func (a *accountMetastoresPreviewImpl) ListAll(ctx context.Context) ([]MetastoreInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[MetastoreInfo](ctx, iterator) +} +func (a *accountMetastoresPreviewImpl) internalList(ctx context.Context) (*ListMetastoresResponse, error) { + var listMetastoresResponse ListMetastoresResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores", a.client.ConfiguredAccountID()) + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listMetastoresResponse) + return &listMetastoresResponse, err +} + +func (a *accountMetastoresPreviewImpl) Update(ctx context.Context, request AccountsUpdateMetastore) (*AccountsMetastoreInfo, error) { + var accountsMetastoreInfo AccountsMetastoreInfo + path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &accountsMetastoreInfo) + return &accountsMetastoreInfo, err +} + +// unexported type that holds implementations of just AccountStorageCredentialsPreview API methods +type accountStorageCredentialsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *accountStorageCredentialsPreviewImpl) Create(ctx context.Context, request AccountsCreateStorageCredential) (*AccountsStorageCredentialInfo, error) { + var accountsStorageCredentialInfo AccountsStorageCredentialInfo + path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/storage-credentials", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &accountsStorageCredentialInfo) + return &accountsStorageCredentialInfo, err +} + +func (a *accountStorageCredentialsPreviewImpl) Delete(ctx context.Context, request DeleteAccountStorageCredentialRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/storage-credentials/%v", a.client.ConfiguredAccountID(), request.MetastoreId, request.StorageCredentialName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *accountStorageCredentialsPreviewImpl) Get(ctx context.Context, request GetAccountStorageCredentialRequest) (*AccountsStorageCredentialInfo, error) { + var accountsStorageCredentialInfo AccountsStorageCredentialInfo + path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/storage-credentials/%v", a.client.ConfiguredAccountID(), request.MetastoreId, request.StorageCredentialName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &accountsStorageCredentialInfo) + return &accountsStorageCredentialInfo, err +} + +// Get all storage credentials assigned to a metastore. +// +// Gets a list of all storage credentials that have been assigned to given +// metastore. +func (a *accountStorageCredentialsPreviewImpl) List(ctx context.Context, request ListAccountStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { + + getNextPage := func(ctx context.Context, req ListAccountStorageCredentialsRequest) (*ListAccountStorageCredentialsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAccountStorageCredentialsResponse) []StorageCredentialInfo { + return resp.StorageCredentials + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all storage credentials assigned to a metastore. +// +// Gets a list of all storage credentials that have been assigned to given +// metastore. +func (a *accountStorageCredentialsPreviewImpl) ListAll(ctx context.Context, request ListAccountStorageCredentialsRequest) ([]StorageCredentialInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[StorageCredentialInfo](ctx, iterator) +} +func (a *accountStorageCredentialsPreviewImpl) internalList(ctx context.Context, request ListAccountStorageCredentialsRequest) (*ListAccountStorageCredentialsResponse, error) { + var listAccountStorageCredentialsResponse ListAccountStorageCredentialsResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/storage-credentials", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAccountStorageCredentialsResponse) + return &listAccountStorageCredentialsResponse, err +} + +func (a *accountStorageCredentialsPreviewImpl) Update(ctx context.Context, request AccountsUpdateStorageCredential) (*AccountsStorageCredentialInfo, error) { + var accountsStorageCredentialInfo AccountsStorageCredentialInfo + path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/storage-credentials/%v", a.client.ConfiguredAccountID(), request.MetastoreId, request.StorageCredentialName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &accountsStorageCredentialInfo) + return &accountsStorageCredentialInfo, err +} + +// unexported type that holds implementations of just ArtifactAllowlistsPreview API methods +type artifactAllowlistsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *artifactAllowlistsPreviewImpl) Get(ctx context.Context, request GetArtifactAllowlistRequest) (*ArtifactAllowlistInfo, error) { + var artifactAllowlistInfo ArtifactAllowlistInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/artifact-allowlists/%v", request.ArtifactType) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &artifactAllowlistInfo) + return &artifactAllowlistInfo, err +} + +func (a *artifactAllowlistsPreviewImpl) Update(ctx context.Context, request SetArtifactAllowlist) (*ArtifactAllowlistInfo, error) { + var artifactAllowlistInfo ArtifactAllowlistInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/artifact-allowlists/%v", request.ArtifactType) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &artifactAllowlistInfo) + return &artifactAllowlistInfo, err +} + +// unexported type that holds implementations of just CatalogsPreview API methods +type catalogsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *catalogsPreviewImpl) Create(ctx context.Context, request CreateCatalog) (*CatalogInfo, error) { + var catalogInfo CatalogInfo + path := "/api/2.1preview/unity-catalog/catalogs" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &catalogInfo) + return &catalogInfo, err +} + +func (a *catalogsPreviewImpl) Delete(ctx context.Context, request DeleteCatalogRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/catalogs/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *catalogsPreviewImpl) Get(ctx context.Context, request GetCatalogRequest) (*CatalogInfo, error) { + var catalogInfo CatalogInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/catalogs/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &catalogInfo) + return &catalogInfo, err +} + +// List catalogs. +// +// Gets an array of catalogs in the metastore. If the caller is the metastore +// admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the +// caller (or for which the caller has the **USE_CATALOG** privilege) will be +// retrieved. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *catalogsPreviewImpl) List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo] { + + getNextPage := func(ctx context.Context, req ListCatalogsRequest) (*ListCatalogsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListCatalogsResponse) []CatalogInfo { + return resp.Catalogs + } + getNextReq := func(resp *ListCatalogsResponse) *ListCatalogsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List catalogs. +// +// Gets an array of catalogs in the metastore. If the caller is the metastore +// admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the +// caller (or for which the caller has the **USE_CATALOG** privilege) will be +// retrieved. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *catalogsPreviewImpl) ListAll(ctx context.Context, request ListCatalogsRequest) ([]CatalogInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[CatalogInfo](ctx, iterator) +} +func (a *catalogsPreviewImpl) internalList(ctx context.Context, request ListCatalogsRequest) (*ListCatalogsResponse, error) { + var listCatalogsResponse ListCatalogsResponse + path := "/api/2.1preview/unity-catalog/catalogs" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listCatalogsResponse) + return &listCatalogsResponse, err +} + +func (a *catalogsPreviewImpl) Update(ctx context.Context, request UpdateCatalog) (*CatalogInfo, error) { + var catalogInfo CatalogInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/catalogs/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &catalogInfo) + return &catalogInfo, err +} + +// unexported type that holds implementations of just ConnectionsPreview API methods +type connectionsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *connectionsPreviewImpl) Create(ctx context.Context, request CreateConnection) (*ConnectionInfo, error) { + var connectionInfo ConnectionInfo + path := "/api/2.1preview/unity-catalog/connections" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &connectionInfo) + return &connectionInfo, err +} + +func (a *connectionsPreviewImpl) Delete(ctx context.Context, request DeleteConnectionRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/connections/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *connectionsPreviewImpl) Get(ctx context.Context, request GetConnectionRequest) (*ConnectionInfo, error) { + var connectionInfo ConnectionInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/connections/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &connectionInfo) + return &connectionInfo, err +} + +// List connections. +// +// List all connections. +func (a *connectionsPreviewImpl) List(ctx context.Context, request ListConnectionsRequest) listing.Iterator[ConnectionInfo] { + + getNextPage := func(ctx context.Context, req ListConnectionsRequest) (*ListConnectionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListConnectionsResponse) []ConnectionInfo { + return resp.Connections + } + getNextReq := func(resp *ListConnectionsResponse) *ListConnectionsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List connections. +// +// List all connections. +func (a *connectionsPreviewImpl) ListAll(ctx context.Context, request ListConnectionsRequest) ([]ConnectionInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ConnectionInfo](ctx, iterator) +} +func (a *connectionsPreviewImpl) internalList(ctx context.Context, request ListConnectionsRequest) (*ListConnectionsResponse, error) { + var listConnectionsResponse ListConnectionsResponse + path := "/api/2.1preview/unity-catalog/connections" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listConnectionsResponse) + return &listConnectionsResponse, err +} + +func (a *connectionsPreviewImpl) Update(ctx context.Context, request UpdateConnection) (*ConnectionInfo, error) { + var connectionInfo ConnectionInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/connections/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &connectionInfo) + return &connectionInfo, err +} + +// unexported type that holds implementations of just CredentialsPreview API methods +type credentialsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *credentialsPreviewImpl) CreateCredential(ctx context.Context, request CreateCredentialRequest) (*CredentialInfo, error) { + var credentialInfo CredentialInfo + path := "/api/2.1preview/unity-catalog/credentials" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &credentialInfo) + return &credentialInfo, err +} + +func (a *credentialsPreviewImpl) DeleteCredential(ctx context.Context, request DeleteCredentialRequest) error { + var deleteCredentialResponse DeleteCredentialResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/credentials/%v", request.NameArg) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteCredentialResponse) + return err +} + +func (a *credentialsPreviewImpl) GenerateTemporaryServiceCredential(ctx context.Context, request GenerateTemporaryServiceCredentialRequest) (*TemporaryCredentials, error) { + var temporaryCredentials TemporaryCredentials + path := "/api/2.1preview/unity-catalog/temporary-service-credentials" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &temporaryCredentials) + return &temporaryCredentials, err +} + +func (a *credentialsPreviewImpl) GetCredential(ctx context.Context, request GetCredentialRequest) (*CredentialInfo, error) { + var credentialInfo CredentialInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/credentials/%v", request.NameArg) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &credentialInfo) + return &credentialInfo, err +} + +// List credentials. +// +// Gets an array of credentials (as __CredentialInfo__ objects). +// +// The array is limited to only the credentials that the caller has permission +// to access. If the caller is a metastore admin, retrieval of credentials is +// unrestricted. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *credentialsPreviewImpl) ListCredentials(ctx context.Context, request ListCredentialsRequest) listing.Iterator[CredentialInfo] { + + getNextPage := func(ctx context.Context, req ListCredentialsRequest) (*ListCredentialsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListCredentials(ctx, req) + } + getItems := func(resp *ListCredentialsResponse) []CredentialInfo { + return resp.Credentials + } + getNextReq := func(resp *ListCredentialsResponse) *ListCredentialsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List credentials. +// +// Gets an array of credentials (as __CredentialInfo__ objects). +// +// The array is limited to only the credentials that the caller has permission +// to access. If the caller is a metastore admin, retrieval of credentials is +// unrestricted. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *credentialsPreviewImpl) ListCredentialsAll(ctx context.Context, request ListCredentialsRequest) ([]CredentialInfo, error) { + iterator := a.ListCredentials(ctx, request) + return listing.ToSlice[CredentialInfo](ctx, iterator) +} +func (a *credentialsPreviewImpl) internalListCredentials(ctx context.Context, request ListCredentialsRequest) (*ListCredentialsResponse, error) { + var listCredentialsResponse ListCredentialsResponse + path := "/api/2.1preview/unity-catalog/credentials" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listCredentialsResponse) + return &listCredentialsResponse, err +} + +func (a *credentialsPreviewImpl) UpdateCredential(ctx context.Context, request UpdateCredentialRequest) (*CredentialInfo, error) { + var credentialInfo CredentialInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/credentials/%v", request.NameArg) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &credentialInfo) + return &credentialInfo, err +} + +func (a *credentialsPreviewImpl) ValidateCredential(ctx context.Context, request ValidateCredentialRequest) (*ValidateCredentialResponse, error) { + var validateCredentialResponse ValidateCredentialResponse + path := "/api/2.1preview/unity-catalog/validate-credentials" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &validateCredentialResponse) + return &validateCredentialResponse, err +} + +// unexported type that holds implementations of just ExternalLocationsPreview API methods +type externalLocationsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *externalLocationsPreviewImpl) Create(ctx context.Context, request CreateExternalLocation) (*ExternalLocationInfo, error) { + var externalLocationInfo ExternalLocationInfo + path := "/api/2.1preview/unity-catalog/external-locations" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &externalLocationInfo) + return &externalLocationInfo, err +} + +func (a *externalLocationsPreviewImpl) Delete(ctx context.Context, request DeleteExternalLocationRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/external-locations/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *externalLocationsPreviewImpl) Get(ctx context.Context, request GetExternalLocationRequest) (*ExternalLocationInfo, error) { + var externalLocationInfo ExternalLocationInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/external-locations/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &externalLocationInfo) + return &externalLocationInfo, err +} + +// List external locations. +// +// Gets an array of external locations (__ExternalLocationInfo__ objects) from +// the metastore. The caller must be a metastore admin, the owner of the +// external location, or a user that has some privilege on the external +// location. There is no guarantee of a specific ordering of the elements in the +// array. +func (a *externalLocationsPreviewImpl) List(ctx context.Context, request ListExternalLocationsRequest) listing.Iterator[ExternalLocationInfo] { + + getNextPage := func(ctx context.Context, req ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListExternalLocationsResponse) []ExternalLocationInfo { + return resp.ExternalLocations + } + getNextReq := func(resp *ListExternalLocationsResponse) *ListExternalLocationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List external locations. +// +// Gets an array of external locations (__ExternalLocationInfo__ objects) from +// the metastore. The caller must be a metastore admin, the owner of the +// external location, or a user that has some privilege on the external +// location. There is no guarantee of a specific ordering of the elements in the +// array. +func (a *externalLocationsPreviewImpl) ListAll(ctx context.Context, request ListExternalLocationsRequest) ([]ExternalLocationInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ExternalLocationInfo](ctx, iterator) +} +func (a *externalLocationsPreviewImpl) internalList(ctx context.Context, request ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { + var listExternalLocationsResponse ListExternalLocationsResponse + path := "/api/2.1preview/unity-catalog/external-locations" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listExternalLocationsResponse) + return &listExternalLocationsResponse, err +} + +func (a *externalLocationsPreviewImpl) Update(ctx context.Context, request UpdateExternalLocation) (*ExternalLocationInfo, error) { + var externalLocationInfo ExternalLocationInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/external-locations/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &externalLocationInfo) + return &externalLocationInfo, err +} + +// unexported type that holds implementations of just FunctionsPreview API methods +type functionsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *functionsPreviewImpl) Create(ctx context.Context, request CreateFunctionRequest) (*FunctionInfo, error) { + var functionInfo FunctionInfo + path := "/api/2.1preview/unity-catalog/functions" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &functionInfo) + return &functionInfo, err +} + +func (a *functionsPreviewImpl) Delete(ctx context.Context, request DeleteFunctionRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/functions/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *functionsPreviewImpl) Get(ctx context.Context, request GetFunctionRequest) (*FunctionInfo, error) { + var functionInfo FunctionInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/functions/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &functionInfo) + return &functionInfo, err +} + +// List functions. +// +// List functions within the specified parent catalog and schema. If the user is +// a metastore admin, all functions are returned in the output list. Otherwise, +// the user must have the **USE_CATALOG** privilege on the catalog and the +// **USE_SCHEMA** privilege on the schema, and the output list contains only +// functions for which either the user has the **EXECUTE** privilege or the user +// is the owner. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *functionsPreviewImpl) List(ctx context.Context, request ListFunctionsRequest) listing.Iterator[FunctionInfo] { + + getNextPage := func(ctx context.Context, req ListFunctionsRequest) (*ListFunctionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListFunctionsResponse) []FunctionInfo { + return resp.Functions + } + getNextReq := func(resp *ListFunctionsResponse) *ListFunctionsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List functions. +// +// List functions within the specified parent catalog and schema. If the user is +// a metastore admin, all functions are returned in the output list. Otherwise, +// the user must have the **USE_CATALOG** privilege on the catalog and the +// **USE_SCHEMA** privilege on the schema, and the output list contains only +// functions for which either the user has the **EXECUTE** privilege or the user +// is the owner. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *functionsPreviewImpl) ListAll(ctx context.Context, request ListFunctionsRequest) ([]FunctionInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[FunctionInfo](ctx, iterator) +} +func (a *functionsPreviewImpl) internalList(ctx context.Context, request ListFunctionsRequest) (*ListFunctionsResponse, error) { + var listFunctionsResponse ListFunctionsResponse + path := "/api/2.1preview/unity-catalog/functions" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listFunctionsResponse) + return &listFunctionsResponse, err +} + +func (a *functionsPreviewImpl) Update(ctx context.Context, request UpdateFunction) (*FunctionInfo, error) { + var functionInfo FunctionInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/functions/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &functionInfo) + return &functionInfo, err +} + +// unexported type that holds implementations of just GrantsPreview API methods +type grantsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *grantsPreviewImpl) Get(ctx context.Context, request GetGrantRequest) (*PermissionsList, error) { + var permissionsList PermissionsList + path := fmt.Sprintf("/api/2.1preview/unity-catalog/permissions/%v/%v", request.SecurableType, request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &permissionsList) + return &permissionsList, err +} + +func (a *grantsPreviewImpl) GetEffective(ctx context.Context, request GetEffectiveRequest) (*EffectivePermissionsList, error) { + var effectivePermissionsList EffectivePermissionsList + path := fmt.Sprintf("/api/2.1preview/unity-catalog/effective-permissions/%v/%v", request.SecurableType, request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &effectivePermissionsList) + return &effectivePermissionsList, err +} + +func (a *grantsPreviewImpl) Update(ctx context.Context, request UpdatePermissions) (*PermissionsList, error) { + var permissionsList PermissionsList + path := fmt.Sprintf("/api/2.1preview/unity-catalog/permissions/%v/%v", request.SecurableType, request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &permissionsList) + return &permissionsList, err +} + +// unexported type that holds implementations of just MetastoresPreview API methods +type metastoresPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *metastoresPreviewImpl) Assign(ctx context.Context, request CreateMetastoreAssignment) error { + var assignResponse AssignResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/workspaces/%v/metastore", request.WorkspaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &assignResponse) + return err +} + +func (a *metastoresPreviewImpl) Create(ctx context.Context, request CreateMetastore) (*MetastoreInfo, error) { + var metastoreInfo MetastoreInfo + path := "/api/2.1preview/unity-catalog/metastores" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &metastoreInfo) + return &metastoreInfo, err +} + +func (a *metastoresPreviewImpl) Current(ctx context.Context) (*MetastoreAssignment, error) { + var metastoreAssignment MetastoreAssignment + path := "/api/2.1preview/unity-catalog/current-metastore-assignment" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &metastoreAssignment) + return &metastoreAssignment, err +} + +func (a *metastoresPreviewImpl) Delete(ctx context.Context, request DeleteMetastoreRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *metastoresPreviewImpl) Get(ctx context.Context, request GetMetastoreRequest) (*MetastoreInfo, error) { + var metastoreInfo MetastoreInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &metastoreInfo) + return &metastoreInfo, err +} + +// List metastores. +// +// Gets an array of the available metastores (as __MetastoreInfo__ objects). The +// caller must be an admin to retrieve this info. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *metastoresPreviewImpl) List(ctx context.Context) listing.Iterator[MetastoreInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListMetastoresResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListMetastoresResponse) []MetastoreInfo { + return resp.Metastores + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List metastores. +// +// Gets an array of the available metastores (as __MetastoreInfo__ objects). The +// caller must be an admin to retrieve this info. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *metastoresPreviewImpl) ListAll(ctx context.Context) ([]MetastoreInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[MetastoreInfo](ctx, iterator) +} +func (a *metastoresPreviewImpl) internalList(ctx context.Context) (*ListMetastoresResponse, error) { + var listMetastoresResponse ListMetastoresResponse + path := "/api/2.1preview/unity-catalog/metastores" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listMetastoresResponse) + return &listMetastoresResponse, err +} + +func (a *metastoresPreviewImpl) Summary(ctx context.Context) (*GetMetastoreSummaryResponse, error) { + var getMetastoreSummaryResponse GetMetastoreSummaryResponse + path := "/api/2.1preview/unity-catalog/metastore_summary" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getMetastoreSummaryResponse) + return &getMetastoreSummaryResponse, err +} + +func (a *metastoresPreviewImpl) Unassign(ctx context.Context, request UnassignRequest) error { + var unassignResponse UnassignResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/workspaces/%v/metastore", request.WorkspaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &unassignResponse) + return err +} + +func (a *metastoresPreviewImpl) Update(ctx context.Context, request UpdateMetastore) (*MetastoreInfo, error) { + var metastoreInfo MetastoreInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &metastoreInfo) + return &metastoreInfo, err +} + +func (a *metastoresPreviewImpl) UpdateAssignment(ctx context.Context, request UpdateMetastoreAssignment) error { + var updateAssignmentResponse UpdateAssignmentResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/workspaces/%v/metastore", request.WorkspaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateAssignmentResponse) + return err +} + +// unexported type that holds implementations of just ModelVersionsPreview API methods +type modelVersionsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *modelVersionsPreviewImpl) Delete(ctx context.Context, request DeleteModelVersionRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/versions/%v", request.FullName, request.Version) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *modelVersionsPreviewImpl) Get(ctx context.Context, request GetModelVersionRequest) (*ModelVersionInfo, error) { + var modelVersionInfo ModelVersionInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/versions/%v", request.FullName, request.Version) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &modelVersionInfo) + return &modelVersionInfo, err +} + +func (a *modelVersionsPreviewImpl) GetByAlias(ctx context.Context, request GetByAliasRequest) (*ModelVersionInfo, error) { + var modelVersionInfo ModelVersionInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/aliases/%v", request.FullName, request.Alias) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &modelVersionInfo) + return &modelVersionInfo, err +} + +// List Model Versions. +// +// List model versions. You can list model versions under a particular schema, +// or list all model versions in the current metastore. +// +// The returned models are filtered based on the privileges of the calling user. +// For example, the metastore admin is able to list all the model versions. A +// regular user needs to be the owner or have the **EXECUTE** privilege on the +// parent registered model to recieve the model versions in the response. For +// the latter case, the caller must also be the owner or have the +// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** +// privilege on the parent schema. +// +// There is no guarantee of a specific ordering of the elements in the response. +// The elements in the response will not contain any aliases or tags. +func (a *modelVersionsPreviewImpl) List(ctx context.Context, request ListModelVersionsRequest) listing.Iterator[ModelVersionInfo] { + + getNextPage := func(ctx context.Context, req ListModelVersionsRequest) (*ListModelVersionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListModelVersionsResponse) []ModelVersionInfo { + return resp.ModelVersions + } + getNextReq := func(resp *ListModelVersionsResponse) *ListModelVersionsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List Model Versions. +// +// List model versions. You can list model versions under a particular schema, +// or list all model versions in the current metastore. +// +// The returned models are filtered based on the privileges of the calling user. +// For example, the metastore admin is able to list all the model versions. A +// regular user needs to be the owner or have the **EXECUTE** privilege on the +// parent registered model to recieve the model versions in the response. For +// the latter case, the caller must also be the owner or have the +// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** +// privilege on the parent schema. +// +// There is no guarantee of a specific ordering of the elements in the response. +// The elements in the response will not contain any aliases or tags. +func (a *modelVersionsPreviewImpl) ListAll(ctx context.Context, request ListModelVersionsRequest) ([]ModelVersionInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ModelVersionInfo](ctx, iterator) +} +func (a *modelVersionsPreviewImpl) internalList(ctx context.Context, request ListModelVersionsRequest) (*ListModelVersionsResponse, error) { + var listModelVersionsResponse ListModelVersionsResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/versions", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listModelVersionsResponse) + return &listModelVersionsResponse, err +} + +func (a *modelVersionsPreviewImpl) Update(ctx context.Context, request UpdateModelVersionRequest) (*ModelVersionInfo, error) { + var modelVersionInfo ModelVersionInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/versions/%v", request.FullName, request.Version) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &modelVersionInfo) + return &modelVersionInfo, err +} + +// unexported type that holds implementations of just OnlineTablesPreview API methods +type onlineTablesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *onlineTablesPreviewImpl) Create(ctx context.Context, request CreateOnlineTableRequest) (*OnlineTable, error) { + var onlineTable OnlineTable + path := "/api/2.0preview/online-tables" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Table, &onlineTable) + return &onlineTable, err +} + +func (a *onlineTablesPreviewImpl) Delete(ctx context.Context, request DeleteOnlineTableRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/online-tables/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *onlineTablesPreviewImpl) Get(ctx context.Context, request GetOnlineTableRequest) (*OnlineTable, error) { + var onlineTable OnlineTable + path := fmt.Sprintf("/api/2.0preview/online-tables/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &onlineTable) + return &onlineTable, err +} + +// unexported type that holds implementations of just QualityMonitorsPreview API methods +type qualityMonitorsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *qualityMonitorsPreviewImpl) CancelRefresh(ctx context.Context, request CancelRefreshRequest) error { + var cancelRefreshResponse CancelRefreshResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor/refreshes/%v/cancel", request.TableName, request.RefreshId) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &cancelRefreshResponse) + return err +} + +func (a *qualityMonitorsPreviewImpl) Create(ctx context.Context, request CreateMonitor) (*MonitorInfo, error) { + var monitorInfo MonitorInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor", request.TableName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &monitorInfo) + return &monitorInfo, err +} + +func (a *qualityMonitorsPreviewImpl) Delete(ctx context.Context, request DeleteQualityMonitorRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor", request.TableName) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *qualityMonitorsPreviewImpl) Get(ctx context.Context, request GetQualityMonitorRequest) (*MonitorInfo, error) { + var monitorInfo MonitorInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor", request.TableName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &monitorInfo) + return &monitorInfo, err +} + +func (a *qualityMonitorsPreviewImpl) GetRefresh(ctx context.Context, request GetRefreshRequest) (*MonitorRefreshInfo, error) { + var monitorRefreshInfo MonitorRefreshInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor/refreshes/%v", request.TableName, request.RefreshId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &monitorRefreshInfo) + return &monitorRefreshInfo, err +} + +func (a *qualityMonitorsPreviewImpl) ListRefreshes(ctx context.Context, request ListRefreshesRequest) (*MonitorRefreshListResponse, error) { + var monitorRefreshListResponse MonitorRefreshListResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor/refreshes", request.TableName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &monitorRefreshListResponse) + return &monitorRefreshListResponse, err +} + +func (a *qualityMonitorsPreviewImpl) RegenerateDashboard(ctx context.Context, request RegenerateDashboardRequest) (*RegenerateDashboardResponse, error) { + var regenerateDashboardResponse RegenerateDashboardResponse + path := fmt.Sprintf("/api/2.1preview/quality-monitoring/tables/%v/monitor/dashboard", request.TableName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, ®enerateDashboardResponse) + return ®enerateDashboardResponse, err +} + +func (a *qualityMonitorsPreviewImpl) RunRefresh(ctx context.Context, request RunRefreshRequest) (*MonitorRefreshInfo, error) { + var monitorRefreshInfo MonitorRefreshInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor/refreshes", request.TableName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &monitorRefreshInfo) + return &monitorRefreshInfo, err +} + +func (a *qualityMonitorsPreviewImpl) Update(ctx context.Context, request UpdateMonitor) (*MonitorInfo, error) { + var monitorInfo MonitorInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor", request.TableName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &monitorInfo) + return &monitorInfo, err +} + +// unexported type that holds implementations of just RegisteredModelsPreview API methods +type registeredModelsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *registeredModelsPreviewImpl) Create(ctx context.Context, request CreateRegisteredModelRequest) (*RegisteredModelInfo, error) { + var registeredModelInfo RegisteredModelInfo + path := "/api/2.1preview/unity-catalog/models" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, ®isteredModelInfo) + return ®isteredModelInfo, err +} + +func (a *registeredModelsPreviewImpl) Delete(ctx context.Context, request DeleteRegisteredModelRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *registeredModelsPreviewImpl) DeleteAlias(ctx context.Context, request DeleteAliasRequest) error { + var deleteAliasResponse DeleteAliasResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/aliases/%v", request.FullName, request.Alias) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteAliasResponse) + return err +} + +func (a *registeredModelsPreviewImpl) Get(ctx context.Context, request GetRegisteredModelRequest) (*RegisteredModelInfo, error) { + var registeredModelInfo RegisteredModelInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, ®isteredModelInfo) + return ®isteredModelInfo, err +} + +// List Registered Models. +// +// List registered models. You can list registered models under a particular +// schema, or list all registered models in the current metastore. +// +// The returned models are filtered based on the privileges of the calling user. +// For example, the metastore admin is able to list all the registered models. A +// regular user needs to be the owner or have the **EXECUTE** privilege on the +// registered model to recieve the registered models in the response. For the +// latter case, the caller must also be the owner or have the **USE_CATALOG** +// privilege on the parent catalog and the **USE_SCHEMA** privilege on the +// parent schema. +// +// There is no guarantee of a specific ordering of the elements in the response. +func (a *registeredModelsPreviewImpl) List(ctx context.Context, request ListRegisteredModelsRequest) listing.Iterator[RegisteredModelInfo] { + + getNextPage := func(ctx context.Context, req ListRegisteredModelsRequest) (*ListRegisteredModelsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListRegisteredModelsResponse) []RegisteredModelInfo { + return resp.RegisteredModels + } + getNextReq := func(resp *ListRegisteredModelsResponse) *ListRegisteredModelsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List Registered Models. +// +// List registered models. You can list registered models under a particular +// schema, or list all registered models in the current metastore. +// +// The returned models are filtered based on the privileges of the calling user. +// For example, the metastore admin is able to list all the registered models. A +// regular user needs to be the owner or have the **EXECUTE** privilege on the +// registered model to recieve the registered models in the response. For the +// latter case, the caller must also be the owner or have the **USE_CATALOG** +// privilege on the parent catalog and the **USE_SCHEMA** privilege on the +// parent schema. +// +// There is no guarantee of a specific ordering of the elements in the response. +func (a *registeredModelsPreviewImpl) ListAll(ctx context.Context, request ListRegisteredModelsRequest) ([]RegisteredModelInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[RegisteredModelInfo](ctx, iterator) +} +func (a *registeredModelsPreviewImpl) internalList(ctx context.Context, request ListRegisteredModelsRequest) (*ListRegisteredModelsResponse, error) { + var listRegisteredModelsResponse ListRegisteredModelsResponse + path := "/api/2.1preview/unity-catalog/models" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listRegisteredModelsResponse) + return &listRegisteredModelsResponse, err +} + +func (a *registeredModelsPreviewImpl) SetAlias(ctx context.Context, request SetRegisteredModelAliasRequest) (*RegisteredModelAlias, error) { + var registeredModelAlias RegisteredModelAlias + path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/aliases/%v", request.FullName, request.Alias) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, ®isteredModelAlias) + return ®isteredModelAlias, err +} + +func (a *registeredModelsPreviewImpl) Update(ctx context.Context, request UpdateRegisteredModelRequest) (*RegisteredModelInfo, error) { + var registeredModelInfo RegisteredModelInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, ®isteredModelInfo) + return ®isteredModelInfo, err +} + +// unexported type that holds implementations of just ResourceQuotasPreview API methods +type resourceQuotasPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *resourceQuotasPreviewImpl) GetQuota(ctx context.Context, request GetQuotaRequest) (*GetQuotaResponse, error) { + var getQuotaResponse GetQuotaResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/resource-quotas/%v/%v/%v", request.ParentSecurableType, request.ParentFullName, request.QuotaName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getQuotaResponse) + return &getQuotaResponse, err +} + +// List all resource quotas under a metastore. +// +// ListQuotas returns all quota values under the metastore. There are no SLAs on +// the freshness of the counts returned. This API does not trigger a refresh of +// quota counts. +func (a *resourceQuotasPreviewImpl) ListQuotas(ctx context.Context, request ListQuotasRequest) listing.Iterator[QuotaInfo] { + + getNextPage := func(ctx context.Context, req ListQuotasRequest) (*ListQuotasResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListQuotas(ctx, req) + } + getItems := func(resp *ListQuotasResponse) []QuotaInfo { + return resp.Quotas + } + getNextReq := func(resp *ListQuotasResponse) *ListQuotasRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List all resource quotas under a metastore. +// +// ListQuotas returns all quota values under the metastore. There are no SLAs on +// the freshness of the counts returned. This API does not trigger a refresh of +// quota counts. +func (a *resourceQuotasPreviewImpl) ListQuotasAll(ctx context.Context, request ListQuotasRequest) ([]QuotaInfo, error) { + iterator := a.ListQuotas(ctx, request) + return listing.ToSlice[QuotaInfo](ctx, iterator) +} +func (a *resourceQuotasPreviewImpl) internalListQuotas(ctx context.Context, request ListQuotasRequest) (*ListQuotasResponse, error) { + var listQuotasResponse ListQuotasResponse + path := "/api/2.1preview/unity-catalog/resource-quotas/all-resource-quotas" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listQuotasResponse) + return &listQuotasResponse, err +} + +// unexported type that holds implementations of just SchemasPreview API methods +type schemasPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *schemasPreviewImpl) Create(ctx context.Context, request CreateSchema) (*SchemaInfo, error) { + var schemaInfo SchemaInfo + path := "/api/2.1preview/unity-catalog/schemas" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &schemaInfo) + return &schemaInfo, err +} + +func (a *schemasPreviewImpl) Delete(ctx context.Context, request DeleteSchemaRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/schemas/%v", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *schemasPreviewImpl) Get(ctx context.Context, request GetSchemaRequest) (*SchemaInfo, error) { + var schemaInfo SchemaInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/schemas/%v", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &schemaInfo) + return &schemaInfo, err +} + +// List schemas. +// +// Gets an array of schemas for a catalog in the metastore. If the caller is the +// metastore admin or the owner of the parent catalog, all schemas for the +// catalog will be retrieved. Otherwise, only schemas owned by the caller (or +// for which the caller has the **USE_SCHEMA** privilege) will be retrieved. +// There is no guarantee of a specific ordering of the elements in the array. +func (a *schemasPreviewImpl) List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo] { + + getNextPage := func(ctx context.Context, req ListSchemasRequest) (*ListSchemasResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListSchemasResponse) []SchemaInfo { + return resp.Schemas + } + getNextReq := func(resp *ListSchemasResponse) *ListSchemasRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List schemas. +// +// Gets an array of schemas for a catalog in the metastore. If the caller is the +// metastore admin or the owner of the parent catalog, all schemas for the +// catalog will be retrieved. Otherwise, only schemas owned by the caller (or +// for which the caller has the **USE_SCHEMA** privilege) will be retrieved. +// There is no guarantee of a specific ordering of the elements in the array. +func (a *schemasPreviewImpl) ListAll(ctx context.Context, request ListSchemasRequest) ([]SchemaInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[SchemaInfo](ctx, iterator) +} +func (a *schemasPreviewImpl) internalList(ctx context.Context, request ListSchemasRequest) (*ListSchemasResponse, error) { + var listSchemasResponse ListSchemasResponse + path := "/api/2.1preview/unity-catalog/schemas" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSchemasResponse) + return &listSchemasResponse, err +} + +func (a *schemasPreviewImpl) Update(ctx context.Context, request UpdateSchema) (*SchemaInfo, error) { + var schemaInfo SchemaInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/schemas/%v", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &schemaInfo) + return &schemaInfo, err +} + +// unexported type that holds implementations of just StorageCredentialsPreview API methods +type storageCredentialsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *storageCredentialsPreviewImpl) Create(ctx context.Context, request CreateStorageCredential) (*StorageCredentialInfo, error) { + var storageCredentialInfo StorageCredentialInfo + path := "/api/2.1preview/unity-catalog/storage-credentials" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &storageCredentialInfo) + return &storageCredentialInfo, err +} + +func (a *storageCredentialsPreviewImpl) Delete(ctx context.Context, request DeleteStorageCredentialRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/storage-credentials/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *storageCredentialsPreviewImpl) Get(ctx context.Context, request GetStorageCredentialRequest) (*StorageCredentialInfo, error) { + var storageCredentialInfo StorageCredentialInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/storage-credentials/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &storageCredentialInfo) + return &storageCredentialInfo, err +} + +// List credentials. +// +// Gets an array of storage credentials (as __StorageCredentialInfo__ objects). +// The array is limited to only those storage credentials the caller has +// permission to access. If the caller is a metastore admin, retrieval of +// credentials is unrestricted. There is no guarantee of a specific ordering of +// the elements in the array. +func (a *storageCredentialsPreviewImpl) List(ctx context.Context, request ListStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { + + getNextPage := func(ctx context.Context, req ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListStorageCredentialsResponse) []StorageCredentialInfo { + return resp.StorageCredentials + } + getNextReq := func(resp *ListStorageCredentialsResponse) *ListStorageCredentialsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List credentials. +// +// Gets an array of storage credentials (as __StorageCredentialInfo__ objects). +// The array is limited to only those storage credentials the caller has +// permission to access. If the caller is a metastore admin, retrieval of +// credentials is unrestricted. There is no guarantee of a specific ordering of +// the elements in the array. +func (a *storageCredentialsPreviewImpl) ListAll(ctx context.Context, request ListStorageCredentialsRequest) ([]StorageCredentialInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[StorageCredentialInfo](ctx, iterator) +} +func (a *storageCredentialsPreviewImpl) internalList(ctx context.Context, request ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { + var listStorageCredentialsResponse ListStorageCredentialsResponse + path := "/api/2.1preview/unity-catalog/storage-credentials" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listStorageCredentialsResponse) + return &listStorageCredentialsResponse, err +} + +func (a *storageCredentialsPreviewImpl) Update(ctx context.Context, request UpdateStorageCredential) (*StorageCredentialInfo, error) { + var storageCredentialInfo StorageCredentialInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/storage-credentials/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &storageCredentialInfo) + return &storageCredentialInfo, err +} + +func (a *storageCredentialsPreviewImpl) Validate(ctx context.Context, request ValidateStorageCredential) (*ValidateStorageCredentialResponse, error) { + var validateStorageCredentialResponse ValidateStorageCredentialResponse + path := "/api/2.1preview/unity-catalog/validate-storage-credentials" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &validateStorageCredentialResponse) + return &validateStorageCredentialResponse, err +} + +// unexported type that holds implementations of just SystemSchemasPreview API methods +type systemSchemasPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *systemSchemasPreviewImpl) Disable(ctx context.Context, request DisableRequest) error { + var disableResponse DisableResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v/systemschemas/%v", request.MetastoreId, request.SchemaName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &disableResponse) + return err +} + +func (a *systemSchemasPreviewImpl) Enable(ctx context.Context, request EnableRequest) error { + var enableResponse EnableResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v/systemschemas/%v", request.MetastoreId, request.SchemaName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, nil, &enableResponse) + return err +} + +// List system schemas. +// +// Gets an array of system schemas for a metastore. The caller must be an +// account admin or a metastore admin. +func (a *systemSchemasPreviewImpl) List(ctx context.Context, request ListSystemSchemasRequest) listing.Iterator[SystemSchemaInfo] { + + getNextPage := func(ctx context.Context, req ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListSystemSchemasResponse) []SystemSchemaInfo { + return resp.Schemas + } + getNextReq := func(resp *ListSystemSchemasResponse) *ListSystemSchemasRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List system schemas. +// +// Gets an array of system schemas for a metastore. The caller must be an +// account admin or a metastore admin. +func (a *systemSchemasPreviewImpl) ListAll(ctx context.Context, request ListSystemSchemasRequest) ([]SystemSchemaInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[SystemSchemaInfo](ctx, iterator) +} +func (a *systemSchemasPreviewImpl) internalList(ctx context.Context, request ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { + var listSystemSchemasResponse ListSystemSchemasResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v/systemschemas", request.MetastoreId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSystemSchemasResponse) + return &listSystemSchemasResponse, err +} + +// unexported type that holds implementations of just TableConstraintsPreview API methods +type tableConstraintsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *tableConstraintsPreviewImpl) Create(ctx context.Context, request CreateTableConstraint) (*TableConstraint, error) { + var tableConstraint TableConstraint + path := "/api/2.1preview/unity-catalog/constraints" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &tableConstraint) + return &tableConstraint, err +} + +func (a *tableConstraintsPreviewImpl) Delete(ctx context.Context, request DeleteTableConstraintRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/constraints/%v", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +// unexported type that holds implementations of just TablesPreview API methods +type tablesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *tablesPreviewImpl) Delete(ctx context.Context, request DeleteTableRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *tablesPreviewImpl) Exists(ctx context.Context, request ExistsRequest) (*TableExistsResponse, error) { + var tableExistsResponse TableExistsResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/exists", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &tableExistsResponse) + return &tableExistsResponse, err +} + +func (a *tablesPreviewImpl) Get(ctx context.Context, request GetTableRequest) (*TableInfo, error) { + var tableInfo TableInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &tableInfo) + return &tableInfo, err +} + +// List tables. +// +// Gets an array of all tables for the current metastore under the parent +// catalog and schema. The caller must be a metastore admin or an owner of (or +// have the **SELECT** privilege on) the table. For the latter case, the caller +// must also be the owner or have the **USE_CATALOG** privilege on the parent +// catalog and the **USE_SCHEMA** privilege on the parent schema. There is no +// guarantee of a specific ordering of the elements in the array. +func (a *tablesPreviewImpl) List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo] { + + getNextPage := func(ctx context.Context, req ListTablesRequest) (*ListTablesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListTablesResponse) []TableInfo { + return resp.Tables + } + getNextReq := func(resp *ListTablesResponse) *ListTablesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List tables. +// +// Gets an array of all tables for the current metastore under the parent +// catalog and schema. The caller must be a metastore admin or an owner of (or +// have the **SELECT** privilege on) the table. For the latter case, the caller +// must also be the owner or have the **USE_CATALOG** privilege on the parent +// catalog and the **USE_SCHEMA** privilege on the parent schema. There is no +// guarantee of a specific ordering of the elements in the array. +func (a *tablesPreviewImpl) ListAll(ctx context.Context, request ListTablesRequest) ([]TableInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[TableInfo](ctx, iterator) +} +func (a *tablesPreviewImpl) internalList(ctx context.Context, request ListTablesRequest) (*ListTablesResponse, error) { + var listTablesResponse ListTablesResponse + path := "/api/2.1preview/unity-catalog/tables" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listTablesResponse) + return &listTablesResponse, err +} + +// List table summaries. +// +// Gets an array of summaries for tables for a schema and catalog within the +// metastore. The table summaries returned are either: +// +// * summaries for tables (within the current metastore and parent catalog and +// schema), when the user is a metastore admin, or: * summaries for tables and +// schemas (within the current metastore and parent catalog) for which the user +// has ownership or the **SELECT** privilege on the table and ownership or +// **USE_SCHEMA** privilege on the schema, provided that the user also has +// ownership or the **USE_CATALOG** privilege on the parent catalog. +// +// There is no guarantee of a specific ordering of the elements in the array. +func (a *tablesPreviewImpl) ListSummaries(ctx context.Context, request ListSummariesRequest) listing.Iterator[TableSummary] { + + getNextPage := func(ctx context.Context, req ListSummariesRequest) (*ListTableSummariesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListSummaries(ctx, req) + } + getItems := func(resp *ListTableSummariesResponse) []TableSummary { + return resp.Tables + } + getNextReq := func(resp *ListTableSummariesResponse) *ListSummariesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List table summaries. +// +// Gets an array of summaries for tables for a schema and catalog within the +// metastore. The table summaries returned are either: +// +// * summaries for tables (within the current metastore and parent catalog and +// schema), when the user is a metastore admin, or: * summaries for tables and +// schemas (within the current metastore and parent catalog) for which the user +// has ownership or the **SELECT** privilege on the table and ownership or +// **USE_SCHEMA** privilege on the schema, provided that the user also has +// ownership or the **USE_CATALOG** privilege on the parent catalog. +// +// There is no guarantee of a specific ordering of the elements in the array. +func (a *tablesPreviewImpl) ListSummariesAll(ctx context.Context, request ListSummariesRequest) ([]TableSummary, error) { + iterator := a.ListSummaries(ctx, request) + return listing.ToSlice[TableSummary](ctx, iterator) +} +func (a *tablesPreviewImpl) internalListSummaries(ctx context.Context, request ListSummariesRequest) (*ListTableSummariesResponse, error) { + var listTableSummariesResponse ListTableSummariesResponse + path := "/api/2.1preview/unity-catalog/table-summaries" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listTableSummariesResponse) + return &listTableSummariesResponse, err +} + +func (a *tablesPreviewImpl) Update(ctx context.Context, request UpdateTableRequest) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v", request.FullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just TemporaryTableCredentialsPreview API methods +type temporaryTableCredentialsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *temporaryTableCredentialsPreviewImpl) GenerateTemporaryTableCredentials(ctx context.Context, request GenerateTemporaryTableCredentialRequest) (*GenerateTemporaryTableCredentialResponse, error) { + var generateTemporaryTableCredentialResponse GenerateTemporaryTableCredentialResponse + path := "/api/2.0preview/unity-catalog/temporary-table-credentials" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &generateTemporaryTableCredentialResponse) + return &generateTemporaryTableCredentialResponse, err +} + +// unexported type that holds implementations of just VolumesPreview API methods +type volumesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *volumesPreviewImpl) Create(ctx context.Context, request CreateVolumeRequestContent) (*VolumeInfo, error) { + var volumeInfo VolumeInfo + path := "/api/2.1preview/unity-catalog/volumes" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &volumeInfo) + return &volumeInfo, err +} + +func (a *volumesPreviewImpl) Delete(ctx context.Context, request DeleteVolumeRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/volumes/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +// List Volumes. +// +// Gets an array of volumes for the current metastore under the parent catalog +// and schema. +// +// The returned volumes are filtered based on the privileges of the calling +// user. For example, the metastore admin is able to list all the volumes. A +// regular user needs to be the owner or have the **READ VOLUME** privilege on +// the volume to recieve the volumes in the response. For the latter case, the +// caller must also be the owner or have the **USE_CATALOG** privilege on the +// parent catalog and the **USE_SCHEMA** privilege on the parent schema. +// +// There is no guarantee of a specific ordering of the elements in the array. +func (a *volumesPreviewImpl) List(ctx context.Context, request ListVolumesRequest) listing.Iterator[VolumeInfo] { + + getNextPage := func(ctx context.Context, req ListVolumesRequest) (*ListVolumesResponseContent, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListVolumesResponseContent) []VolumeInfo { + return resp.Volumes + } + getNextReq := func(resp *ListVolumesResponseContent) *ListVolumesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List Volumes. +// +// Gets an array of volumes for the current metastore under the parent catalog +// and schema. +// +// The returned volumes are filtered based on the privileges of the calling +// user. For example, the metastore admin is able to list all the volumes. A +// regular user needs to be the owner or have the **READ VOLUME** privilege on +// the volume to recieve the volumes in the response. For the latter case, the +// caller must also be the owner or have the **USE_CATALOG** privilege on the +// parent catalog and the **USE_SCHEMA** privilege on the parent schema. +// +// There is no guarantee of a specific ordering of the elements in the array. +func (a *volumesPreviewImpl) ListAll(ctx context.Context, request ListVolumesRequest) ([]VolumeInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[VolumeInfo](ctx, iterator) +} +func (a *volumesPreviewImpl) internalList(ctx context.Context, request ListVolumesRequest) (*ListVolumesResponseContent, error) { + var listVolumesResponseContent ListVolumesResponseContent + path := "/api/2.1preview/unity-catalog/volumes" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listVolumesResponseContent) + return &listVolumesResponseContent, err +} + +func (a *volumesPreviewImpl) Read(ctx context.Context, request ReadVolumeRequest) (*VolumeInfo, error) { + var volumeInfo VolumeInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/volumes/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &volumeInfo) + return &volumeInfo, err +} + +func (a *volumesPreviewImpl) Update(ctx context.Context, request UpdateVolumeRequestContent) (*VolumeInfo, error) { + var volumeInfo VolumeInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/volumes/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &volumeInfo) + return &volumeInfo, err +} + +// unexported type that holds implementations of just WorkspaceBindingsPreview API methods +type workspaceBindingsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *workspaceBindingsPreviewImpl) Get(ctx context.Context, request GetWorkspaceBindingRequest) (*CurrentWorkspaceBindings, error) { + var currentWorkspaceBindings CurrentWorkspaceBindings + path := fmt.Sprintf("/api/2.1preview/unity-catalog/workspace-bindings/catalogs/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, ¤tWorkspaceBindings) + return ¤tWorkspaceBindings, err +} + +// Get securable workspace bindings. +// +// Gets workspace bindings of the securable. The caller must be a metastore +// admin or an owner of the securable. +func (a *workspaceBindingsPreviewImpl) GetBindings(ctx context.Context, request GetBindingsRequest) listing.Iterator[WorkspaceBinding] { + + getNextPage := func(ctx context.Context, req GetBindingsRequest) (*WorkspaceBindingsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalGetBindings(ctx, req) + } + getItems := func(resp *WorkspaceBindingsResponse) []WorkspaceBinding { + return resp.Bindings + } + getNextReq := func(resp *WorkspaceBindingsResponse) *GetBindingsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get securable workspace bindings. +// +// Gets workspace bindings of the securable. The caller must be a metastore +// admin or an owner of the securable. +func (a *workspaceBindingsPreviewImpl) GetBindingsAll(ctx context.Context, request GetBindingsRequest) ([]WorkspaceBinding, error) { + iterator := a.GetBindings(ctx, request) + return listing.ToSlice[WorkspaceBinding](ctx, iterator) +} +func (a *workspaceBindingsPreviewImpl) internalGetBindings(ctx context.Context, request GetBindingsRequest) (*WorkspaceBindingsResponse, error) { + var workspaceBindingsResponse WorkspaceBindingsResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/bindings/%v/%v", request.SecurableType, request.SecurableName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &workspaceBindingsResponse) + return &workspaceBindingsResponse, err +} + +func (a *workspaceBindingsPreviewImpl) Update(ctx context.Context, request UpdateWorkspaceBindings) (*CurrentWorkspaceBindings, error) { + var currentWorkspaceBindings CurrentWorkspaceBindings + path := fmt.Sprintf("/api/2.1preview/unity-catalog/workspace-bindings/catalogs/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, ¤tWorkspaceBindings) + return ¤tWorkspaceBindings, err +} + +func (a *workspaceBindingsPreviewImpl) UpdateBindings(ctx context.Context, request UpdateWorkspaceBindingsParameters) (*WorkspaceBindingsResponse, error) { + var workspaceBindingsResponse WorkspaceBindingsResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/bindings/%v/%v", request.SecurableType, request.SecurableName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &workspaceBindingsResponse) + return &workspaceBindingsResponse, err +} diff --git a/catalog/v2preview/model.go b/catalog/v2preview/model.go new file mode 100755 index 000000000..8be9f201f --- /dev/null +++ b/catalog/v2preview/model.go @@ -0,0 +1,6163 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package catalogpreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type AccountsCreateMetastore struct { + MetastoreInfo *CreateMetastore `json:"metastore_info,omitempty"` +} + +type AccountsCreateMetastoreAssignment struct { + MetastoreAssignment *CreateMetastoreAssignment `json:"metastore_assignment,omitempty"` + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` + // Workspace ID. + WorkspaceId int64 `json:"-" url:"-"` +} + +type AccountsCreateStorageCredential struct { + CredentialInfo *CreateStorageCredential `json:"credential_info,omitempty"` + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` +} + +type AccountsMetastoreAssignment struct { + MetastoreAssignment *MetastoreAssignment `json:"metastore_assignment,omitempty"` +} + +type AccountsMetastoreInfo struct { + MetastoreInfo *MetastoreInfo `json:"metastore_info,omitempty"` +} + +type AccountsStorageCredentialInfo struct { + CredentialInfo *StorageCredentialInfo `json:"credential_info,omitempty"` +} + +type AccountsUpdateMetastore struct { + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` + + MetastoreInfo *UpdateMetastore `json:"metastore_info,omitempty"` +} + +type AccountsUpdateMetastoreAssignment struct { + MetastoreAssignment *UpdateMetastoreAssignment `json:"metastore_assignment,omitempty"` + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` + // Workspace ID. + WorkspaceId int64 `json:"-" url:"-"` +} + +type AccountsUpdateStorageCredential struct { + CredentialInfo *UpdateStorageCredential `json:"credential_info,omitempty"` + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` + // Name of the storage credential. + StorageCredentialName string `json:"-" url:"-"` +} + +type ArtifactAllowlistInfo struct { + // A list of allowed artifact match patterns. + ArtifactMatchers []ArtifactMatcher `json:"artifact_matchers,omitempty"` + // Time at which this artifact allowlist was set, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of the user who set the artifact allowlist. + CreatedBy string `json:"created_by,omitempty"` + // Unique identifier of parent metastore. + MetastoreId string `json:"metastore_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ArtifactAllowlistInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ArtifactAllowlistInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ArtifactMatcher struct { + // The artifact path or maven coordinate + Artifact string `json:"artifact"` + // The pattern matching type of the artifact + MatchType MatchType `json:"match_type"` +} + +// The artifact type +type ArtifactType string + +const ArtifactTypeInitScript ArtifactType = `INIT_SCRIPT` + +const ArtifactTypeLibraryJar ArtifactType = `LIBRARY_JAR` + +const ArtifactTypeLibraryMaven ArtifactType = `LIBRARY_MAVEN` + +// String representation for [fmt.Print] +func (f *ArtifactType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ArtifactType) Set(v string) error { + switch v { + case `INIT_SCRIPT`, `LIBRARY_JAR`, `LIBRARY_MAVEN`: + *f = ArtifactType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "INIT_SCRIPT", "LIBRARY_JAR", "LIBRARY_MAVEN"`, v) + } +} + +// Type always returns ArtifactType to satisfy [pflag.Value] interface +func (f *ArtifactType) Type() string { + return "ArtifactType" +} + +type AssignResponse struct { +} + +// AWS temporary credentials for API authentication. Read more at +// https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. +type AwsCredentials struct { + // The access key ID that identifies the temporary credentials. + AccessKeyId string `json:"access_key_id,omitempty"` + // The Amazon Resource Name (ARN) of the S3 access point for temporary + // credentials related the external location. + AccessPoint string `json:"access_point,omitempty"` + // The secret access key that can be used to sign AWS API requests. + SecretAccessKey string `json:"secret_access_key,omitempty"` + // The token that users must pass to AWS API to use the temporary + // credentials. + SessionToken string `json:"session_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AwsCredentials) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AwsCredentials) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The AWS IAM role configuration +type AwsIamRole struct { + // The external ID used in role assumption to prevent the confused deputy + // problem. + ExternalId string `json:"external_id,omitempty"` + // The Amazon Resource Name (ARN) of the AWS IAM role used to vend temporary + // credentials. + RoleArn string `json:"role_arn,omitempty"` + // The Amazon Resource Name (ARN) of the AWS IAM user managed by Databricks. + // This is the identity that is going to assume the AWS IAM role. + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AwsIamRole) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AwsIamRole) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AwsIamRoleRequest struct { + // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access. + RoleArn string `json:"role_arn"` +} + +type AwsIamRoleResponse struct { + // The external ID used in role assumption to prevent confused deputy + // problem.. + ExternalId string `json:"external_id,omitempty"` + // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access. + RoleArn string `json:"role_arn"` + // The Amazon Resource Name (ARN) of the AWS IAM user managed by Databricks. + // This is the identity that is going to assume the AWS IAM role. + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AwsIamRoleResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AwsIamRoleResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Azure Active Directory token, essentially the Oauth token for Azure Service +// Principal or Managed Identity. Read more at +// https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token +type AzureActiveDirectoryToken struct { + // Opaque token that contains claims that you can use in Azure Active + // Directory to access cloud services. + AadToken string `json:"aad_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AzureActiveDirectoryToken) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AzureActiveDirectoryToken) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The Azure managed identity configuration. +type AzureManagedIdentity struct { + // The Azure resource ID of the Azure Databricks Access Connector. Use the + // format + // `/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}`. + AccessConnectorId string `json:"access_connector_id"` + // The Databricks internal ID that represents this managed identity. This + // field is only used to persist the credential_id once it is fetched from + // the credentials manager - as we only use the protobuf serializer to store + // credentials, this ID gets persisted to the database. . + CredentialId string `json:"credential_id,omitempty"` + // The Azure resource ID of the managed identity. Use the format, + // `/subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}` + // This is only available for user-assgined identities. For system-assigned + // identities, the access_connector_id is used to identify the identity. If + // this field is not provided, then we assume the AzureManagedIdentity is + // using the system-assigned identity. + ManagedIdentityId string `json:"managed_identity_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AzureManagedIdentity) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AzureManagedIdentity) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AzureManagedIdentityRequest struct { + // The Azure resource ID of the Azure Databricks Access Connector. Use the + // format + // /subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}. + AccessConnectorId string `json:"access_connector_id"` + // The Azure resource ID of the managed identity. Use the format + // /subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}. + // This is only available for user-assgined identities. For system-assigned + // identities, the access_connector_id is used to identify the identity. If + // this field is not provided, then we assume the AzureManagedIdentity is + // for a system-assigned identity. + ManagedIdentityId string `json:"managed_identity_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AzureManagedIdentityRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AzureManagedIdentityRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AzureManagedIdentityResponse struct { + // The Azure resource ID of the Azure Databricks Access Connector. Use the + // format + // /subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}. + AccessConnectorId string `json:"access_connector_id"` + // The Databricks internal ID that represents this managed identity. + CredentialId string `json:"credential_id,omitempty"` + // The Azure resource ID of the managed identity. Use the format + // /subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}. + // This is only available for user-assgined identities. For system-assigned + // identities, the access_connector_id is used to identify the identity. If + // this field is not provided, then we assume the AzureManagedIdentity is + // for a system-assigned identity. + ManagedIdentityId string `json:"managed_identity_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AzureManagedIdentityResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AzureManagedIdentityResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The Azure service principal configuration. Only applicable when purpose is +// **STORAGE**. +type AzureServicePrincipal struct { + // The application ID of the application registration within the referenced + // AAD tenant. + ApplicationId string `json:"application_id"` + // The client secret generated for the above app ID in AAD. + ClientSecret string `json:"client_secret"` + // The directory ID corresponding to the Azure Active Directory (AAD) tenant + // of the application. + DirectoryId string `json:"directory_id"` +} + +// Azure temporary credentials for API authentication. Read more at +// https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas +type AzureUserDelegationSas struct { + // The signed URI (SAS Token) used to access blob services for a given path + SasToken string `json:"sas_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AzureUserDelegationSas) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AzureUserDelegationSas) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Cancel refresh +type CancelRefreshRequest struct { + // ID of the refresh. + RefreshId string `json:"-" url:"-"` + // Full name of the table. + TableName string `json:"-" url:"-"` +} + +type CancelRefreshResponse struct { +} + +type CatalogInfo struct { + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly bool `json:"browse_only,omitempty"` + // The type of the catalog. + CatalogType CatalogType `json:"catalog_type,omitempty"` + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // The name of the connection to an external data source. + ConnectionName string `json:"connection_name,omitempty"` + // Time at which this catalog was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of catalog creator. + CreatedBy string `json:"created_by,omitempty"` + + EffectivePredictiveOptimizationFlag *EffectivePredictiveOptimizationFlag `json:"effective_predictive_optimization_flag,omitempty"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + EnablePredictiveOptimization EnablePredictiveOptimization `json:"enable_predictive_optimization,omitempty"` + // The full name of the catalog. Corresponds with the name field. + FullName string `json:"full_name,omitempty"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode CatalogIsolationMode `json:"isolation_mode,omitempty"` + // Unique identifier of parent metastore. + MetastoreId string `json:"metastore_id,omitempty"` + // Name of catalog. + Name string `json:"name,omitempty"` + // A map of key-value properties attached to the securable. + Options map[string]string `json:"options,omitempty"` + // Username of current owner of catalog. + Owner string `json:"owner,omitempty"` + // A map of key-value properties attached to the securable. + Properties map[string]string `json:"properties,omitempty"` + // The name of delta sharing provider. + // + // A Delta Sharing catalog is a catalog that is based on a Delta share on a + // remote sharing server. + ProviderName string `json:"provider_name,omitempty"` + // Status of an asynchronously provisioned resource. + ProvisioningInfo *ProvisioningInfo `json:"provisioning_info,omitempty"` + + SecurableType string `json:"securable_type,omitempty"` + // The name of the share under the share provider. + ShareName string `json:"share_name,omitempty"` + // Storage Location URL (full path) for managed tables within catalog. + StorageLocation string `json:"storage_location,omitempty"` + // Storage root URL for managed tables within catalog. + StorageRoot string `json:"storage_root,omitempty"` + // Time at which this catalog was last modified, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of user who last modified catalog. + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CatalogInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CatalogInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Whether the current securable is accessible from all workspaces or a specific +// set of workspaces. +type CatalogIsolationMode string + +const CatalogIsolationModeIsolated CatalogIsolationMode = `ISOLATED` + +const CatalogIsolationModeOpen CatalogIsolationMode = `OPEN` + +// String representation for [fmt.Print] +func (f *CatalogIsolationMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CatalogIsolationMode) Set(v string) error { + switch v { + case `ISOLATED`, `OPEN`: + *f = CatalogIsolationMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ISOLATED", "OPEN"`, v) + } +} + +// Type always returns CatalogIsolationMode to satisfy [pflag.Value] interface +func (f *CatalogIsolationMode) Type() string { + return "CatalogIsolationMode" +} + +// The type of the catalog. +type CatalogType string + +const CatalogTypeDeltasharingCatalog CatalogType = `DELTASHARING_CATALOG` + +const CatalogTypeManagedCatalog CatalogType = `MANAGED_CATALOG` + +const CatalogTypeSystemCatalog CatalogType = `SYSTEM_CATALOG` + +// String representation for [fmt.Print] +func (f *CatalogType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CatalogType) Set(v string) error { + switch v { + case `DELTASHARING_CATALOG`, `MANAGED_CATALOG`, `SYSTEM_CATALOG`: + *f = CatalogType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELTASHARING_CATALOG", "MANAGED_CATALOG", "SYSTEM_CATALOG"`, v) + } +} + +// Type always returns CatalogType to satisfy [pflag.Value] interface +func (f *CatalogType) Type() string { + return "CatalogType" +} + +type CloudflareApiToken struct { + // The Cloudflare access key id of the token. + AccessKeyId string `json:"access_key_id"` + // The account id associated with the API token. + AccountId string `json:"account_id"` + // The secret access token generated for the access key id + SecretAccessKey string `json:"secret_access_key"` +} + +type ColumnInfo struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + + Mask *ColumnMask `json:"mask,omitempty"` + // Name of Column. + Name string `json:"name,omitempty"` + // Whether field may be Null (default: true). + Nullable bool `json:"nullable,omitempty"` + // Partition index for column. + PartitionIndex int `json:"partition_index,omitempty"` + // Ordinal position of column (starting at position 0). + Position int `json:"position,omitempty"` + // Format of IntervalType. + TypeIntervalType string `json:"type_interval_type,omitempty"` + // Full data type specification, JSON-serialized. + TypeJson string `json:"type_json,omitempty"` + + TypeName ColumnTypeName `json:"type_name,omitempty"` + // Digits of precision; required for DecimalTypes. + TypePrecision int `json:"type_precision,omitempty"` + // Digits to right of decimal; Required for DecimalTypes. + TypeScale int `json:"type_scale,omitempty"` + // Full data type specification as SQL/catalogString text. + TypeText string `json:"type_text,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ColumnInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ColumnInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ColumnMask struct { + // The full name of the column mask SQL UDF. + FunctionName string `json:"function_name,omitempty"` + // The list of additional table columns to be passed as input to the column + // mask function. The first arg of the mask function should be of the type + // of the column being masked and the types of the rest of the args should + // match the types of columns in 'using_column_names'. + UsingColumnNames []string `json:"using_column_names,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ColumnMask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ColumnMask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ColumnTypeName string + +const ColumnTypeNameArray ColumnTypeName = `ARRAY` + +const ColumnTypeNameBinary ColumnTypeName = `BINARY` + +const ColumnTypeNameBoolean ColumnTypeName = `BOOLEAN` + +const ColumnTypeNameByte ColumnTypeName = `BYTE` + +const ColumnTypeNameChar ColumnTypeName = `CHAR` + +const ColumnTypeNameDate ColumnTypeName = `DATE` + +const ColumnTypeNameDecimal ColumnTypeName = `DECIMAL` + +const ColumnTypeNameDouble ColumnTypeName = `DOUBLE` + +const ColumnTypeNameFloat ColumnTypeName = `FLOAT` + +const ColumnTypeNameInt ColumnTypeName = `INT` + +const ColumnTypeNameInterval ColumnTypeName = `INTERVAL` + +const ColumnTypeNameLong ColumnTypeName = `LONG` + +const ColumnTypeNameMap ColumnTypeName = `MAP` + +const ColumnTypeNameNull ColumnTypeName = `NULL` + +const ColumnTypeNameShort ColumnTypeName = `SHORT` + +const ColumnTypeNameString ColumnTypeName = `STRING` + +const ColumnTypeNameStruct ColumnTypeName = `STRUCT` + +const ColumnTypeNameTableType ColumnTypeName = `TABLE_TYPE` + +const ColumnTypeNameTimestamp ColumnTypeName = `TIMESTAMP` + +const ColumnTypeNameTimestampNtz ColumnTypeName = `TIMESTAMP_NTZ` + +const ColumnTypeNameUserDefinedType ColumnTypeName = `USER_DEFINED_TYPE` + +const ColumnTypeNameVariant ColumnTypeName = `VARIANT` + +// String representation for [fmt.Print] +func (f *ColumnTypeName) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ColumnTypeName) Set(v string) error { + switch v { + case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`, `VARIANT`: + *f = ColumnTypeName(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TABLE_TYPE", "TIMESTAMP", "TIMESTAMP_NTZ", "USER_DEFINED_TYPE", "VARIANT"`, v) + } +} + +// Type always returns ColumnTypeName to satisfy [pflag.Value] interface +func (f *ColumnTypeName) Type() string { + return "ColumnTypeName" +} + +type ConnectionInfo struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Unique identifier of the Connection. + ConnectionId string `json:"connection_id,omitempty"` + // The type of connection. + ConnectionType ConnectionType `json:"connection_type,omitempty"` + // Time at which this connection was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of connection creator. + CreatedBy string `json:"created_by,omitempty"` + // The type of credential. + CredentialType CredentialType `json:"credential_type,omitempty"` + // Full name of connection. + FullName string `json:"full_name,omitempty"` + // Unique identifier of parent metastore. + MetastoreId string `json:"metastore_id,omitempty"` + // Name of the connection. + Name string `json:"name,omitempty"` + // A map of key-value properties attached to the securable. + Options map[string]string `json:"options,omitempty"` + // Username of current owner of the connection. + Owner string `json:"owner,omitempty"` + // An object containing map of key-value properties attached to the + // connection. + Properties map[string]string `json:"properties,omitempty"` + // Status of an asynchronously provisioned resource. + ProvisioningInfo *ProvisioningInfo `json:"provisioning_info,omitempty"` + // If the connection is read only. + ReadOnly bool `json:"read_only,omitempty"` + + SecurableType string `json:"securable_type,omitempty"` + // Time at which this connection was updated, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of user who last modified connection. + UpdatedBy string `json:"updated_by,omitempty"` + // URL of the remote data source, extracted from options. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ConnectionInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ConnectionInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of connection. +type ConnectionType string + +const ConnectionTypeBigquery ConnectionType = `BIGQUERY` + +const ConnectionTypeDatabricks ConnectionType = `DATABRICKS` + +const ConnectionTypeGlue ConnectionType = `GLUE` + +const ConnectionTypeHiveMetastore ConnectionType = `HIVE_METASTORE` + +const ConnectionTypeHttp ConnectionType = `HTTP` + +const ConnectionTypeMysql ConnectionType = `MYSQL` + +const ConnectionTypePostgresql ConnectionType = `POSTGRESQL` + +const ConnectionTypeRedshift ConnectionType = `REDSHIFT` + +const ConnectionTypeSnowflake ConnectionType = `SNOWFLAKE` + +const ConnectionTypeSqldw ConnectionType = `SQLDW` + +const ConnectionTypeSqlserver ConnectionType = `SQLSERVER` + +// String representation for [fmt.Print] +func (f *ConnectionType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ConnectionType) Set(v string) error { + switch v { + case `BIGQUERY`, `DATABRICKS`, `GLUE`, `HIVE_METASTORE`, `HTTP`, `MYSQL`, `POSTGRESQL`, `REDSHIFT`, `SNOWFLAKE`, `SQLDW`, `SQLSERVER`: + *f = ConnectionType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BIGQUERY", "DATABRICKS", "GLUE", "HIVE_METASTORE", "HTTP", "MYSQL", "POSTGRESQL", "REDSHIFT", "SNOWFLAKE", "SQLDW", "SQLSERVER"`, v) + } +} + +// Type always returns ConnectionType to satisfy [pflag.Value] interface +func (f *ConnectionType) Type() string { + return "ConnectionType" +} + +// Detailed status of an online table. Shown if the online table is in the +// ONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state. +type ContinuousUpdateStatus struct { + // Progress of the initial data synchronization. + InitialPipelineSyncProgress *PipelineProgress `json:"initial_pipeline_sync_progress,omitempty"` + // The last source table Delta version that was synced to the online table. + // Note that this Delta version may not be completely synced to the online + // table yet. + LastProcessedCommitVersion int64 `json:"last_processed_commit_version,omitempty"` + // The timestamp of the last time any data was synchronized from the source + // table to the online table. + Timestamp string `json:"timestamp,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ContinuousUpdateStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ContinuousUpdateStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateCatalog struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // The name of the connection to an external data source. + ConnectionName string `json:"connection_name,omitempty"` + // Name of catalog. + Name string `json:"name"` + // A map of key-value properties attached to the securable. + Options map[string]string `json:"options,omitempty"` + // A map of key-value properties attached to the securable. + Properties map[string]string `json:"properties,omitempty"` + // The name of delta sharing provider. + // + // A Delta Sharing catalog is a catalog that is based on a Delta share on a + // remote sharing server. + ProviderName string `json:"provider_name,omitempty"` + // The name of the share under the share provider. + ShareName string `json:"share_name,omitempty"` + // Storage root URL for managed tables within catalog. + StorageRoot string `json:"storage_root,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateCatalog) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateCatalog) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateConnection struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // The type of connection. + ConnectionType ConnectionType `json:"connection_type"` + // Name of the connection. + Name string `json:"name"` + // A map of key-value properties attached to the securable. + Options map[string]string `json:"options"` + // An object containing map of key-value properties attached to the + // connection. + Properties map[string]string `json:"properties,omitempty"` + // If the connection is read only. + ReadOnly bool `json:"read_only,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateConnection) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateConnection) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateCredentialRequest struct { + // The AWS IAM role configuration + AwsIamRole *AwsIamRole `json:"aws_iam_role,omitempty"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentity `json:"azure_managed_identity,omitempty"` + // The Azure service principal configuration. Only applicable when purpose + // is **STORAGE**. + AzureServicePrincipal *AzureServicePrincipal `json:"azure_service_principal,omitempty"` + // Comment associated with the credential. + Comment string `json:"comment,omitempty"` + // GCP long-lived credential. Databricks-created Google Cloud Storage + // service account. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"` + // The credential name. The name must be unique among storage and service + // credentials within the metastore. + Name string `json:"name"` + // Indicates the purpose of the credential. + Purpose CredentialPurpose `json:"purpose,omitempty"` + // Whether the credential is usable only for read operations. Only + // applicable when purpose is **STORAGE**. + ReadOnly bool `json:"read_only,omitempty"` + // Optional. Supplying true to this argument skips validation of the created + // set of credentials. + SkipValidation bool `json:"skip_validation,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateCredentialRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateCredentialRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateExternalLocation struct { + // The AWS access point to use when accesing s3 for this external location. + AccessPoint string `json:"access_point,omitempty"` + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Name of the storage credential used with this location. + CredentialName string `json:"credential_name"` + // Encryption options that apply to clients connecting to cloud storage. + EncryptionDetails *EncryptionDetails `json:"encryption_details,omitempty"` + // Indicates whether fallback mode is enabled for this external location. + // When fallback mode is enabled, the access to the location falls back to + // cluster credentials if UC credentials are not sufficient. + Fallback bool `json:"fallback,omitempty"` + // Name of the external location. + Name string `json:"name"` + // Indicates whether the external location is read-only. + ReadOnly bool `json:"read_only,omitempty"` + // Skips validation of the storage credential associated with the external + // location. + SkipValidation bool `json:"skip_validation,omitempty"` + // Path URL of the external location. + Url string `json:"url"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateExternalLocation) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateExternalLocation) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateFunction struct { + // Name of parent catalog. + CatalogName string `json:"catalog_name"` + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Scalar function return data type. + DataType ColumnTypeName `json:"data_type"` + // External function language. + ExternalLanguage string `json:"external_language,omitempty"` + // External function name. + ExternalName string `json:"external_name,omitempty"` + // Pretty printed function data type. + FullDataType string `json:"full_data_type"` + + InputParams FunctionParameterInfos `json:"input_params"` + // Whether the function is deterministic. + IsDeterministic bool `json:"is_deterministic"` + // Function null call. + IsNullCall bool `json:"is_null_call"` + // Name of function, relative to parent schema. + Name string `json:"name"` + // Function parameter style. **S** is the value for SQL. + ParameterStyle CreateFunctionParameterStyle `json:"parameter_style"` + // JSON-serialized key-value pair map, encoded (escaped) as a string. + Properties string `json:"properties,omitempty"` + // Table function return parameters. + ReturnParams *FunctionParameterInfos `json:"return_params,omitempty"` + // Function language. When **EXTERNAL** is used, the language of the routine + // function should be specified in the __external_language__ field, and the + // __return_params__ of the function cannot be used (as **TABLE** return + // type is not supported), and the __sql_data_access__ field must be + // **NO_SQL**. + RoutineBody CreateFunctionRoutineBody `json:"routine_body"` + // Function body. + RoutineDefinition string `json:"routine_definition"` + // Function dependencies. + RoutineDependencies *DependencyList `json:"routine_dependencies,omitempty"` + // Name of parent schema relative to its parent catalog. + SchemaName string `json:"schema_name"` + // Function security type. + SecurityType CreateFunctionSecurityType `json:"security_type"` + // Specific name of the function; Reserved for future use. + SpecificName string `json:"specific_name"` + // Function SQL data access. + SqlDataAccess CreateFunctionSqlDataAccess `json:"sql_data_access"` + // List of schemes whose objects can be referenced without qualification. + SqlPath string `json:"sql_path,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateFunction) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateFunction) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Function parameter style. **S** is the value for SQL. +type CreateFunctionParameterStyle string + +const CreateFunctionParameterStyleS CreateFunctionParameterStyle = `S` + +// String representation for [fmt.Print] +func (f *CreateFunctionParameterStyle) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreateFunctionParameterStyle) Set(v string) error { + switch v { + case `S`: + *f = CreateFunctionParameterStyle(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "S"`, v) + } +} + +// Type always returns CreateFunctionParameterStyle to satisfy [pflag.Value] interface +func (f *CreateFunctionParameterStyle) Type() string { + return "CreateFunctionParameterStyle" +} + +type CreateFunctionRequest struct { + // Partial __FunctionInfo__ specifying the function to be created. + FunctionInfo CreateFunction `json:"function_info"` +} + +// Function language. When **EXTERNAL** is used, the language of the routine +// function should be specified in the __external_language__ field, and the +// __return_params__ of the function cannot be used (as **TABLE** return type is +// not supported), and the __sql_data_access__ field must be **NO_SQL**. +type CreateFunctionRoutineBody string + +const CreateFunctionRoutineBodyExternal CreateFunctionRoutineBody = `EXTERNAL` + +const CreateFunctionRoutineBodySql CreateFunctionRoutineBody = `SQL` + +// String representation for [fmt.Print] +func (f *CreateFunctionRoutineBody) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreateFunctionRoutineBody) Set(v string) error { + switch v { + case `EXTERNAL`, `SQL`: + *f = CreateFunctionRoutineBody(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EXTERNAL", "SQL"`, v) + } +} + +// Type always returns CreateFunctionRoutineBody to satisfy [pflag.Value] interface +func (f *CreateFunctionRoutineBody) Type() string { + return "CreateFunctionRoutineBody" +} + +// The security type of the function. +type CreateFunctionSecurityType string + +const CreateFunctionSecurityTypeDefiner CreateFunctionSecurityType = `DEFINER` + +// String representation for [fmt.Print] +func (f *CreateFunctionSecurityType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreateFunctionSecurityType) Set(v string) error { + switch v { + case `DEFINER`: + *f = CreateFunctionSecurityType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DEFINER"`, v) + } +} + +// Type always returns CreateFunctionSecurityType to satisfy [pflag.Value] interface +func (f *CreateFunctionSecurityType) Type() string { + return "CreateFunctionSecurityType" +} + +// Function SQL data access. +type CreateFunctionSqlDataAccess string + +const CreateFunctionSqlDataAccessContainsSql CreateFunctionSqlDataAccess = `CONTAINS_SQL` + +const CreateFunctionSqlDataAccessNoSql CreateFunctionSqlDataAccess = `NO_SQL` + +const CreateFunctionSqlDataAccessReadsSqlData CreateFunctionSqlDataAccess = `READS_SQL_DATA` + +// String representation for [fmt.Print] +func (f *CreateFunctionSqlDataAccess) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreateFunctionSqlDataAccess) Set(v string) error { + switch v { + case `CONTAINS_SQL`, `NO_SQL`, `READS_SQL_DATA`: + *f = CreateFunctionSqlDataAccess(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CONTAINS_SQL", "NO_SQL", "READS_SQL_DATA"`, v) + } +} + +// Type always returns CreateFunctionSqlDataAccess to satisfy [pflag.Value] interface +func (f *CreateFunctionSqlDataAccess) Type() string { + return "CreateFunctionSqlDataAccess" +} + +type CreateMetastore struct { + // The user-specified name of the metastore. + Name string `json:"name"` + // Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). + // The field can be omitted in the __workspace-level__ __API__ but not in + // the __account-level__ __API__. If this field is omitted, the region of + // the workspace receiving the request will be used. + Region string `json:"region,omitempty"` + // The storage root URL for metastore + StorageRoot string `json:"storage_root,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateMetastore) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateMetastore) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateMetastoreAssignment struct { + // The name of the default catalog in the metastore. This field is + // depracted. Please use "Default Namespace API" to configure the default + // catalog for a Databricks workspace. + DefaultCatalogName string `json:"default_catalog_name"` + // The unique ID of the metastore. + MetastoreId string `json:"metastore_id"` + // A workspace ID. + WorkspaceId int64 `json:"-" url:"-"` +} + +type CreateMonitor struct { + // The directory to store monitoring assets (e.g. dashboard, metric tables). + AssetsDir string `json:"assets_dir"` + // Name of the baseline table from which drift metrics are computed from. + // Columns in the monitored table should also be present in the baseline + // table. + BaselineTableName string `json:"baseline_table_name,omitempty"` + // Custom metrics to compute on the monitored table. These can be aggregate + // metrics, derived metrics (from already computed aggregate metrics), or + // drift metrics (comparing metrics across time windows). + CustomMetrics []MonitorMetric `json:"custom_metrics,omitempty"` + // The data classification config for the monitor. + DataClassificationConfig *MonitorDataClassificationConfig `json:"data_classification_config,omitempty"` + // Configuration for monitoring inference logs. + InferenceLog *MonitorInferenceLog `json:"inference_log,omitempty"` + // The notification settings for the monitor. + Notifications *MonitorNotifications `json:"notifications,omitempty"` + // Schema where output metric tables are created. + OutputSchemaName string `json:"output_schema_name"` + // The schedule for automatically updating and refreshing metric tables. + Schedule *MonitorCronSchedule `json:"schedule,omitempty"` + // Whether to skip creating a default dashboard summarizing data quality + // metrics. + SkipBuiltinDashboard bool `json:"skip_builtin_dashboard,omitempty"` + // List of column expressions to slice data with for targeted analysis. The + // data is grouped by each expression independently, resulting in a separate + // slice for each predicate and its complements. For high-cardinality + // columns, only the top 100 unique values by frequency will generate + // slices. + SlicingExprs []string `json:"slicing_exprs,omitempty"` + // Configuration for monitoring snapshot tables. + Snapshot *MonitorSnapshot `json:"snapshot,omitempty"` + // Full name of the table. + TableName string `json:"-" url:"-"` + // Configuration for monitoring time series tables. + TimeSeries *MonitorTimeSeries `json:"time_series,omitempty"` + // Optional argument to specify the warehouse for dashboard creation. If not + // specified, the first running warehouse will be used. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateMonitor) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateMonitor) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Create an Online Table +type CreateOnlineTableRequest struct { + // Online Table information. + Table *OnlineTable `json:"table,omitempty"` +} + +type CreateRegisteredModelRequest struct { + // The name of the catalog where the schema and the registered model reside + CatalogName string `json:"catalog_name"` + // The comment attached to the registered model + Comment string `json:"comment,omitempty"` + // The name of the registered model + Name string `json:"name"` + // The name of the schema where the registered model resides + SchemaName string `json:"schema_name"` + // The storage location on the cloud under which model version data files + // are stored + StorageLocation string `json:"storage_location,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateRegisteredModelRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateRegisteredModelRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateResponse struct { +} + +type CreateSchema struct { + // Name of parent catalog. + CatalogName string `json:"catalog_name"` + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Name of schema, relative to parent catalog. + Name string `json:"name"` + // A map of key-value properties attached to the securable. + Properties map[string]string `json:"properties,omitempty"` + // Storage root URL for managed tables within schema. + StorageRoot string `json:"storage_root,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateSchema) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateSchema) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateStorageCredential struct { + // The AWS IAM role configuration. + AwsIamRole *AwsIamRoleRequest `json:"aws_iam_role,omitempty"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentityRequest `json:"azure_managed_identity,omitempty"` + // The Azure service principal configuration. + AzureServicePrincipal *AzureServicePrincipal `json:"azure_service_principal,omitempty"` + // The Cloudflare API token configuration. + CloudflareApiToken *CloudflareApiToken `json:"cloudflare_api_token,omitempty"` + // Comment associated with the credential. + Comment string `json:"comment,omitempty"` + // The Databricks managed GCP service account configuration. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccountRequest `json:"databricks_gcp_service_account,omitempty"` + // The credential name. The name must be unique within the metastore. + Name string `json:"name"` + // Whether the storage credential is only usable for read operations. + ReadOnly bool `json:"read_only,omitempty"` + // Supplying true to this argument skips validation of the created + // credential. + SkipValidation bool `json:"skip_validation,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateStorageCredential) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateStorageCredential) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateTableConstraint struct { + // A table constraint, as defined by *one* of the following fields being + // set: __primary_key_constraint__, __foreign_key_constraint__, + // __named_table_constraint__. + Constraint TableConstraint `json:"constraint"` + // The full name of the table referenced by the constraint. + FullNameArg string `json:"full_name_arg"` +} + +type CreateVolumeRequestContent struct { + // The name of the catalog where the schema and the volume are + CatalogName string `json:"catalog_name"` + // The comment attached to the volume + Comment string `json:"comment,omitempty"` + // The name of the volume + Name string `json:"name"` + // The name of the schema where the volume is + SchemaName string `json:"schema_name"` + // The storage location on the cloud + StorageLocation string `json:"storage_location,omitempty"` + + VolumeType VolumeType `json:"volume_type"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateVolumeRequestContent) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateVolumeRequestContent) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CredentialInfo struct { + // The AWS IAM role configuration + AwsIamRole *AwsIamRole `json:"aws_iam_role,omitempty"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentity `json:"azure_managed_identity,omitempty"` + // The Azure service principal configuration. Only applicable when purpose + // is **STORAGE**. + AzureServicePrincipal *AzureServicePrincipal `json:"azure_service_principal,omitempty"` + // Comment associated with the credential. + Comment string `json:"comment,omitempty"` + // Time at which this credential was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of credential creator. + CreatedBy string `json:"created_by,omitempty"` + // GCP long-lived credential. Databricks-created Google Cloud Storage + // service account. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"` + // The full name of the credential. + FullName string `json:"full_name,omitempty"` + // The unique identifier of the credential. + Id string `json:"id,omitempty"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode IsolationMode `json:"isolation_mode,omitempty"` + // Unique identifier of the parent metastore. + MetastoreId string `json:"metastore_id,omitempty"` + // The credential name. The name must be unique among storage and service + // credentials within the metastore. + Name string `json:"name,omitempty"` + // Username of current owner of credential. + Owner string `json:"owner,omitempty"` + // Indicates the purpose of the credential. + Purpose CredentialPurpose `json:"purpose,omitempty"` + // Whether the credential is usable only for read operations. Only + // applicable when purpose is **STORAGE**. + ReadOnly bool `json:"read_only,omitempty"` + // Time at which this credential was last modified, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of user who last modified the credential. + UpdatedBy string `json:"updated_by,omitempty"` + // Whether this credential is the current metastore's root storage + // credential. Only applicable when purpose is **STORAGE**. + UsedForManagedStorage bool `json:"used_for_managed_storage,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CredentialInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CredentialInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CredentialPurpose string + +const CredentialPurposeService CredentialPurpose = `SERVICE` + +const CredentialPurposeStorage CredentialPurpose = `STORAGE` + +// String representation for [fmt.Print] +func (f *CredentialPurpose) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CredentialPurpose) Set(v string) error { + switch v { + case `SERVICE`, `STORAGE`: + *f = CredentialPurpose(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "SERVICE", "STORAGE"`, v) + } +} + +// Type always returns CredentialPurpose to satisfy [pflag.Value] interface +func (f *CredentialPurpose) Type() string { + return "CredentialPurpose" +} + +// The type of credential. +type CredentialType string + +const CredentialTypeBearerToken CredentialType = `BEARER_TOKEN` + +const CredentialTypeUsernamePassword CredentialType = `USERNAME_PASSWORD` + +// String representation for [fmt.Print] +func (f *CredentialType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CredentialType) Set(v string) error { + switch v { + case `BEARER_TOKEN`, `USERNAME_PASSWORD`: + *f = CredentialType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BEARER_TOKEN", "USERNAME_PASSWORD"`, v) + } +} + +// Type always returns CredentialType to satisfy [pflag.Value] interface +func (f *CredentialType) Type() string { + return "CredentialType" +} + +type CredentialValidationResult struct { + // Error message would exist when the result does not equal to **PASS**. + Message string `json:"message,omitempty"` + // The results of the tested operation. + Result ValidateCredentialResult `json:"result,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CredentialValidationResult) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CredentialValidationResult) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Currently assigned workspaces +type CurrentWorkspaceBindings struct { + // A list of workspace IDs. + Workspaces []int64 `json:"workspaces,omitempty"` +} + +// Data source format +type DataSourceFormat string + +const DataSourceFormatAvro DataSourceFormat = `AVRO` + +const DataSourceFormatBigqueryFormat DataSourceFormat = `BIGQUERY_FORMAT` + +const DataSourceFormatCsv DataSourceFormat = `CSV` + +const DataSourceFormatDatabricksFormat DataSourceFormat = `DATABRICKS_FORMAT` + +const DataSourceFormatDelta DataSourceFormat = `DELTA` + +const DataSourceFormatDeltasharing DataSourceFormat = `DELTASHARING` + +const DataSourceFormatHiveCustom DataSourceFormat = `HIVE_CUSTOM` + +const DataSourceFormatHiveSerde DataSourceFormat = `HIVE_SERDE` + +const DataSourceFormatJson DataSourceFormat = `JSON` + +const DataSourceFormatMysqlFormat DataSourceFormat = `MYSQL_FORMAT` + +const DataSourceFormatNetsuiteFormat DataSourceFormat = `NETSUITE_FORMAT` + +const DataSourceFormatOrc DataSourceFormat = `ORC` + +const DataSourceFormatParquet DataSourceFormat = `PARQUET` + +const DataSourceFormatPostgresqlFormat DataSourceFormat = `POSTGRESQL_FORMAT` + +const DataSourceFormatRedshiftFormat DataSourceFormat = `REDSHIFT_FORMAT` + +const DataSourceFormatSalesforceFormat DataSourceFormat = `SALESFORCE_FORMAT` + +const DataSourceFormatSnowflakeFormat DataSourceFormat = `SNOWFLAKE_FORMAT` + +const DataSourceFormatSqldwFormat DataSourceFormat = `SQLDW_FORMAT` + +const DataSourceFormatSqlserverFormat DataSourceFormat = `SQLSERVER_FORMAT` + +const DataSourceFormatText DataSourceFormat = `TEXT` + +const DataSourceFormatUnityCatalog DataSourceFormat = `UNITY_CATALOG` + +const DataSourceFormatVectorIndexFormat DataSourceFormat = `VECTOR_INDEX_FORMAT` + +const DataSourceFormatWorkdayRaasFormat DataSourceFormat = `WORKDAY_RAAS_FORMAT` + +// String representation for [fmt.Print] +func (f *DataSourceFormat) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DataSourceFormat) Set(v string) error { + switch v { + case `AVRO`, `BIGQUERY_FORMAT`, `CSV`, `DATABRICKS_FORMAT`, `DELTA`, `DELTASHARING`, `HIVE_CUSTOM`, `HIVE_SERDE`, `JSON`, `MYSQL_FORMAT`, `NETSUITE_FORMAT`, `ORC`, `PARQUET`, `POSTGRESQL_FORMAT`, `REDSHIFT_FORMAT`, `SALESFORCE_FORMAT`, `SNOWFLAKE_FORMAT`, `SQLDW_FORMAT`, `SQLSERVER_FORMAT`, `TEXT`, `UNITY_CATALOG`, `VECTOR_INDEX_FORMAT`, `WORKDAY_RAAS_FORMAT`: + *f = DataSourceFormat(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AVRO", "BIGQUERY_FORMAT", "CSV", "DATABRICKS_FORMAT", "DELTA", "DELTASHARING", "HIVE_CUSTOM", "HIVE_SERDE", "JSON", "MYSQL_FORMAT", "NETSUITE_FORMAT", "ORC", "PARQUET", "POSTGRESQL_FORMAT", "REDSHIFT_FORMAT", "SALESFORCE_FORMAT", "SNOWFLAKE_FORMAT", "SQLDW_FORMAT", "SQLSERVER_FORMAT", "TEXT", "UNITY_CATALOG", "VECTOR_INDEX_FORMAT", "WORKDAY_RAAS_FORMAT"`, v) + } +} + +// Type always returns DataSourceFormat to satisfy [pflag.Value] interface +func (f *DataSourceFormat) Type() string { + return "DataSourceFormat" +} + +// GCP long-lived credential. Databricks-created Google Cloud Storage service +// account. +type DatabricksGcpServiceAccount struct { + // The Databricks internal ID that represents this managed identity. This + // field is only used to persist the credential_id once it is fetched from + // the credentials manager - as we only use the protobuf serializer to store + // credentials, this ID gets persisted to the database + CredentialId string `json:"credential_id,omitempty"` + // The email of the service account. + Email string `json:"email,omitempty"` + // The ID that represents the private key for this Service Account + PrivateKeyId string `json:"private_key_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DatabricksGcpServiceAccount) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DatabricksGcpServiceAccount) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DatabricksGcpServiceAccountRequest struct { +} + +type DatabricksGcpServiceAccountResponse struct { + // The Databricks internal ID that represents this service account. This is + // an output-only field. + CredentialId string `json:"credential_id,omitempty"` + // The email of the service account. This is an output-only field. + Email string `json:"email,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DatabricksGcpServiceAccountResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DatabricksGcpServiceAccountResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete a metastore assignment +type DeleteAccountMetastoreAssignmentRequest struct { + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` + // Workspace ID. + WorkspaceId int64 `json:"-" url:"-"` +} + +// Delete a metastore +type DeleteAccountMetastoreRequest struct { + // Force deletion even if the metastore is not empty. Default is false. + Force bool `json:"-" url:"force,omitempty"` + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteAccountMetastoreRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteAccountMetastoreRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete a storage credential +type DeleteAccountStorageCredentialRequest struct { + // Force deletion even if the Storage Credential is not empty. Default is + // false. + Force bool `json:"-" url:"force,omitempty"` + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` + // Name of the storage credential. + StorageCredentialName string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteAccountStorageCredentialRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteAccountStorageCredentialRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete a Registered Model Alias +type DeleteAliasRequest struct { + // The name of the alias + Alias string `json:"-" url:"-"` + // The three-level (fully qualified) name of the registered model + FullName string `json:"-" url:"-"` +} + +type DeleteAliasResponse struct { +} + +// Delete a catalog +type DeleteCatalogRequest struct { + // Force deletion even if the catalog is not empty. + Force bool `json:"-" url:"force,omitempty"` + // The name of the catalog. + Name string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteCatalogRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteCatalogRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete a connection +type DeleteConnectionRequest struct { + // The name of the connection to be deleted. + Name string `json:"-" url:"-"` +} + +// Delete a credential +type DeleteCredentialRequest struct { + // Force an update even if there are dependent services (when purpose is + // **SERVICE**) or dependent external locations and external tables (when + // purpose is **STORAGE**). + Force bool `json:"-" url:"force,omitempty"` + // Name of the credential. + NameArg string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteCredentialRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteCredentialRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DeleteCredentialResponse struct { +} + +// Delete an external location +type DeleteExternalLocationRequest struct { + // Force deletion even if there are dependent external tables or mounts. + Force bool `json:"-" url:"force,omitempty"` + // Name of the external location. + Name string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteExternalLocationRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteExternalLocationRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete a function +type DeleteFunctionRequest struct { + // Force deletion even if the function is notempty. + Force bool `json:"-" url:"force,omitempty"` + // The fully-qualified name of the function (of the form + // __catalog_name__.__schema_name__.__function__name__). + Name string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteFunctionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteFunctionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete a metastore +type DeleteMetastoreRequest struct { + // Force deletion even if the metastore is not empty. Default is false. + Force bool `json:"-" url:"force,omitempty"` + // Unique ID of the metastore. + Id string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteMetastoreRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteMetastoreRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete a Model Version +type DeleteModelVersionRequest struct { + // The three-level (fully qualified) name of the model version + FullName string `json:"-" url:"-"` + // The integer version number of the model version + Version int `json:"-" url:"-"` +} + +// Delete an Online Table +type DeleteOnlineTableRequest struct { + // Full three-part (catalog, schema, table) name of the table. + Name string `json:"-" url:"-"` +} + +// Delete a table monitor +type DeleteQualityMonitorRequest struct { + // Full name of the table. + TableName string `json:"-" url:"-"` +} + +// Delete a Registered Model +type DeleteRegisteredModelRequest struct { + // The three-level (fully qualified) name of the registered model + FullName string `json:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete a schema +type DeleteSchemaRequest struct { + // Force deletion even if the schema is not empty. + Force bool `json:"-" url:"force,omitempty"` + // Full name of the schema. + FullName string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteSchemaRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteSchemaRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete a credential +type DeleteStorageCredentialRequest struct { + // Force deletion even if there are dependent external locations or external + // tables. + Force bool `json:"-" url:"force,omitempty"` + // Name of the storage credential. + Name string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteStorageCredentialRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteStorageCredentialRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete a table constraint +type DeleteTableConstraintRequest struct { + // If true, try deleting all child constraints of the current constraint. If + // false, reject this operation if the current constraint has any child + // constraints. + Cascade bool `json:"-" url:"cascade"` + // The name of the constraint to delete. + ConstraintName string `json:"-" url:"constraint_name"` + // Full name of the table referenced by the constraint. + FullName string `json:"-" url:"-"` +} + +// Delete a table +type DeleteTableRequest struct { + // Full name of the table. + FullName string `json:"-" url:"-"` +} + +// Delete a Volume +type DeleteVolumeRequest struct { + // The three-level (fully qualified) name of the volume + Name string `json:"-" url:"-"` +} + +// Properties pertaining to the current state of the delta table as given by the +// commit server. This does not contain **delta.*** (input) properties in +// __TableInfo.properties__. +type DeltaRuntimePropertiesKvPairs struct { + // A map of key-value properties attached to the securable. + DeltaRuntimeProperties map[string]string `json:"delta_runtime_properties"` +} + +// A dependency of a SQL object. Either the __table__ field or the __function__ +// field must be defined. +type Dependency struct { + // A function that is dependent on a SQL object. + Function *FunctionDependency `json:"function,omitempty"` + // A table that is dependent on a SQL object. + Table *TableDependency `json:"table,omitempty"` +} + +// A list of dependencies. +type DependencyList struct { + // Array of dependencies. + Dependencies []Dependency `json:"dependencies,omitempty"` +} + +// Disable a system schema +type DisableRequest struct { + // The metastore ID under which the system schema lives. + MetastoreId string `json:"-" url:"-"` + // Full name of the system schema. + SchemaName string `json:"-" url:"-"` +} + +type DisableResponse struct { +} + +type EffectivePermissionsList struct { + // The privileges conveyed to each principal (either directly or via + // inheritance) + PrivilegeAssignments []EffectivePrivilegeAssignment `json:"privilege_assignments,omitempty"` +} + +type EffectivePredictiveOptimizationFlag struct { + // The name of the object from which the flag was inherited. If there was no + // inheritance, this field is left blank. + InheritedFromName string `json:"inherited_from_name,omitempty"` + // The type of the object from which the flag was inherited. If there was no + // inheritance, this field is left blank. + InheritedFromType EffectivePredictiveOptimizationFlagInheritedFromType `json:"inherited_from_type,omitempty"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + Value EnablePredictiveOptimization `json:"value"` + + ForceSendFields []string `json:"-"` +} + +func (s *EffectivePredictiveOptimizationFlag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EffectivePredictiveOptimizationFlag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of the object from which the flag was inherited. If there was no +// inheritance, this field is left blank. +type EffectivePredictiveOptimizationFlagInheritedFromType string + +const EffectivePredictiveOptimizationFlagInheritedFromTypeCatalog EffectivePredictiveOptimizationFlagInheritedFromType = `CATALOG` + +const EffectivePredictiveOptimizationFlagInheritedFromTypeSchema EffectivePredictiveOptimizationFlagInheritedFromType = `SCHEMA` + +// String representation for [fmt.Print] +func (f *EffectivePredictiveOptimizationFlagInheritedFromType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EffectivePredictiveOptimizationFlagInheritedFromType) Set(v string) error { + switch v { + case `CATALOG`, `SCHEMA`: + *f = EffectivePredictiveOptimizationFlagInheritedFromType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CATALOG", "SCHEMA"`, v) + } +} + +// Type always returns EffectivePredictiveOptimizationFlagInheritedFromType to satisfy [pflag.Value] interface +func (f *EffectivePredictiveOptimizationFlagInheritedFromType) Type() string { + return "EffectivePredictiveOptimizationFlagInheritedFromType" +} + +type EffectivePrivilege struct { + // The full name of the object that conveys this privilege via inheritance. + // This field is omitted when privilege is not inherited (it's assigned to + // the securable itself). + InheritedFromName string `json:"inherited_from_name,omitempty"` + // The type of the object that conveys this privilege via inheritance. This + // field is omitted when privilege is not inherited (it's assigned to the + // securable itself). + InheritedFromType SecurableType `json:"inherited_from_type,omitempty"` + // The privilege assigned to the principal. + Privilege Privilege `json:"privilege,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EffectivePrivilege) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EffectivePrivilege) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EffectivePrivilegeAssignment struct { + // The principal (user email address or group name). + Principal string `json:"principal,omitempty"` + // The privileges conveyed to the principal (either directly or via + // inheritance). + Privileges []EffectivePrivilege `json:"privileges,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EffectivePrivilegeAssignment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EffectivePrivilegeAssignment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Whether predictive optimization should be enabled for this object and objects +// under it. +type EnablePredictiveOptimization string + +const EnablePredictiveOptimizationDisable EnablePredictiveOptimization = `DISABLE` + +const EnablePredictiveOptimizationEnable EnablePredictiveOptimization = `ENABLE` + +const EnablePredictiveOptimizationInherit EnablePredictiveOptimization = `INHERIT` + +// String representation for [fmt.Print] +func (f *EnablePredictiveOptimization) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EnablePredictiveOptimization) Set(v string) error { + switch v { + case `DISABLE`, `ENABLE`, `INHERIT`: + *f = EnablePredictiveOptimization(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISABLE", "ENABLE", "INHERIT"`, v) + } +} + +// Type always returns EnablePredictiveOptimization to satisfy [pflag.Value] interface +func (f *EnablePredictiveOptimization) Type() string { + return "EnablePredictiveOptimization" +} + +// Enable a system schema +type EnableRequest struct { + // The metastore ID under which the system schema lives. + MetastoreId string `json:"-" url:"-"` + // Full name of the system schema. + SchemaName string `json:"-" url:"-"` +} + +type EnableResponse struct { +} + +// Encryption options that apply to clients connecting to cloud storage. +type EncryptionDetails struct { + // Server-Side Encryption properties for clients communicating with AWS s3. + SseEncryptionDetails *SseEncryptionDetails `json:"sse_encryption_details,omitempty"` +} + +// Get boolean reflecting if table exists +type ExistsRequest struct { + // Full name of the table. + FullName string `json:"-" url:"-"` +} + +type ExternalLocationInfo struct { + // The AWS access point to use when accesing s3 for this external location. + AccessPoint string `json:"access_point,omitempty"` + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly bool `json:"browse_only,omitempty"` + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Time at which this external location was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of external location creator. + CreatedBy string `json:"created_by,omitempty"` + // Unique ID of the location's storage credential. + CredentialId string `json:"credential_id,omitempty"` + // Name of the storage credential used with this location. + CredentialName string `json:"credential_name,omitempty"` + // Encryption options that apply to clients connecting to cloud storage. + EncryptionDetails *EncryptionDetails `json:"encryption_details,omitempty"` + // Indicates whether fallback mode is enabled for this external location. + // When fallback mode is enabled, the access to the location falls back to + // cluster credentials if UC credentials are not sufficient. + Fallback bool `json:"fallback,omitempty"` + + IsolationMode IsolationMode `json:"isolation_mode,omitempty"` + // Unique identifier of metastore hosting the external location. + MetastoreId string `json:"metastore_id,omitempty"` + // Name of the external location. + Name string `json:"name,omitempty"` + // The owner of the external location. + Owner string `json:"owner,omitempty"` + // Indicates whether the external location is read-only. + ReadOnly bool `json:"read_only,omitempty"` + // Time at which external location this was last modified, in epoch + // milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of user who last modified the external location. + UpdatedBy string `json:"updated_by,omitempty"` + // Path URL of the external location. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExternalLocationInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExternalLocationInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Detailed status of an online table. Shown if the online table is in the +// OFFLINE_FAILED or the ONLINE_PIPELINE_FAILED state. +type FailedStatus struct { + // The last source table Delta version that was synced to the online table. + // Note that this Delta version may only be partially synced to the online + // table. Only populated if the table is still online and available for + // serving. + LastProcessedCommitVersion int64 `json:"last_processed_commit_version,omitempty"` + // The timestamp of the last time any data was synchronized from the source + // table to the online table. Only populated if the table is still online + // and available for serving. + Timestamp string `json:"timestamp,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *FailedStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FailedStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ForeignKeyConstraint struct { + // Column names for this constraint. + ChildColumns []string `json:"child_columns"` + // The name of the constraint. + Name string `json:"name"` + // Column names for this constraint. + ParentColumns []string `json:"parent_columns"` + // The full name of the parent constraint. + ParentTable string `json:"parent_table"` +} + +// A function that is dependent on a SQL object. +type FunctionDependency struct { + // Full name of the dependent function, in the form of + // __catalog_name__.__schema_name__.__function_name__. + FunctionFullName string `json:"function_full_name"` +} + +type FunctionInfo struct { + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly bool `json:"browse_only,omitempty"` + // Name of parent catalog. + CatalogName string `json:"catalog_name,omitempty"` + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Time at which this function was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of function creator. + CreatedBy string `json:"created_by,omitempty"` + // Scalar function return data type. + DataType ColumnTypeName `json:"data_type,omitempty"` + // External function language. + ExternalLanguage string `json:"external_language,omitempty"` + // External function name. + ExternalName string `json:"external_name,omitempty"` + // Pretty printed function data type. + FullDataType string `json:"full_data_type,omitempty"` + // Full name of function, in form of + // __catalog_name__.__schema_name__.__function__name__ + FullName string `json:"full_name,omitempty"` + // Id of Function, relative to parent schema. + FunctionId string `json:"function_id,omitempty"` + + InputParams *FunctionParameterInfos `json:"input_params,omitempty"` + // Whether the function is deterministic. + IsDeterministic bool `json:"is_deterministic,omitempty"` + // Function null call. + IsNullCall bool `json:"is_null_call,omitempty"` + // Unique identifier of parent metastore. + MetastoreId string `json:"metastore_id,omitempty"` + // Name of function, relative to parent schema. + Name string `json:"name,omitempty"` + // Username of current owner of function. + Owner string `json:"owner,omitempty"` + // Function parameter style. **S** is the value for SQL. + ParameterStyle FunctionInfoParameterStyle `json:"parameter_style,omitempty"` + // JSON-serialized key-value pair map, encoded (escaped) as a string. + Properties string `json:"properties,omitempty"` + // Table function return parameters. + ReturnParams *FunctionParameterInfos `json:"return_params,omitempty"` + // Function language. When **EXTERNAL** is used, the language of the routine + // function should be specified in the __external_language__ field, and the + // __return_params__ of the function cannot be used (as **TABLE** return + // type is not supported), and the __sql_data_access__ field must be + // **NO_SQL**. + RoutineBody FunctionInfoRoutineBody `json:"routine_body,omitempty"` + // Function body. + RoutineDefinition string `json:"routine_definition,omitempty"` + // Function dependencies. + RoutineDependencies *DependencyList `json:"routine_dependencies,omitempty"` + // Name of parent schema relative to its parent catalog. + SchemaName string `json:"schema_name,omitempty"` + // Function security type. + SecurityType FunctionInfoSecurityType `json:"security_type,omitempty"` + // Specific name of the function; Reserved for future use. + SpecificName string `json:"specific_name,omitempty"` + // Function SQL data access. + SqlDataAccess FunctionInfoSqlDataAccess `json:"sql_data_access,omitempty"` + // List of schemes whose objects can be referenced without qualification. + SqlPath string `json:"sql_path,omitempty"` + // Time at which this function was created, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of user who last modified function. + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *FunctionInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FunctionInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Function parameter style. **S** is the value for SQL. +type FunctionInfoParameterStyle string + +const FunctionInfoParameterStyleS FunctionInfoParameterStyle = `S` + +// String representation for [fmt.Print] +func (f *FunctionInfoParameterStyle) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionInfoParameterStyle) Set(v string) error { + switch v { + case `S`: + *f = FunctionInfoParameterStyle(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "S"`, v) + } +} + +// Type always returns FunctionInfoParameterStyle to satisfy [pflag.Value] interface +func (f *FunctionInfoParameterStyle) Type() string { + return "FunctionInfoParameterStyle" +} + +// Function language. When **EXTERNAL** is used, the language of the routine +// function should be specified in the __external_language__ field, and the +// __return_params__ of the function cannot be used (as **TABLE** return type is +// not supported), and the __sql_data_access__ field must be **NO_SQL**. +type FunctionInfoRoutineBody string + +const FunctionInfoRoutineBodyExternal FunctionInfoRoutineBody = `EXTERNAL` + +const FunctionInfoRoutineBodySql FunctionInfoRoutineBody = `SQL` + +// String representation for [fmt.Print] +func (f *FunctionInfoRoutineBody) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionInfoRoutineBody) Set(v string) error { + switch v { + case `EXTERNAL`, `SQL`: + *f = FunctionInfoRoutineBody(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EXTERNAL", "SQL"`, v) + } +} + +// Type always returns FunctionInfoRoutineBody to satisfy [pflag.Value] interface +func (f *FunctionInfoRoutineBody) Type() string { + return "FunctionInfoRoutineBody" +} + +// The security type of the function. +type FunctionInfoSecurityType string + +const FunctionInfoSecurityTypeDefiner FunctionInfoSecurityType = `DEFINER` + +// String representation for [fmt.Print] +func (f *FunctionInfoSecurityType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionInfoSecurityType) Set(v string) error { + switch v { + case `DEFINER`: + *f = FunctionInfoSecurityType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DEFINER"`, v) + } +} + +// Type always returns FunctionInfoSecurityType to satisfy [pflag.Value] interface +func (f *FunctionInfoSecurityType) Type() string { + return "FunctionInfoSecurityType" +} + +// Function SQL data access. +type FunctionInfoSqlDataAccess string + +const FunctionInfoSqlDataAccessContainsSql FunctionInfoSqlDataAccess = `CONTAINS_SQL` + +const FunctionInfoSqlDataAccessNoSql FunctionInfoSqlDataAccess = `NO_SQL` + +const FunctionInfoSqlDataAccessReadsSqlData FunctionInfoSqlDataAccess = `READS_SQL_DATA` + +// String representation for [fmt.Print] +func (f *FunctionInfoSqlDataAccess) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionInfoSqlDataAccess) Set(v string) error { + switch v { + case `CONTAINS_SQL`, `NO_SQL`, `READS_SQL_DATA`: + *f = FunctionInfoSqlDataAccess(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CONTAINS_SQL", "NO_SQL", "READS_SQL_DATA"`, v) + } +} + +// Type always returns FunctionInfoSqlDataAccess to satisfy [pflag.Value] interface +func (f *FunctionInfoSqlDataAccess) Type() string { + return "FunctionInfoSqlDataAccess" +} + +type FunctionParameterInfo struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Name of parameter. + Name string `json:"name"` + // Default value of the parameter. + ParameterDefault string `json:"parameter_default,omitempty"` + // The mode of the function parameter. + ParameterMode FunctionParameterMode `json:"parameter_mode,omitempty"` + // The type of function parameter. + ParameterType FunctionParameterType `json:"parameter_type,omitempty"` + // Ordinal position of column (starting at position 0). + Position int `json:"position"` + // Format of IntervalType. + TypeIntervalType string `json:"type_interval_type,omitempty"` + // Full data type spec, JSON-serialized. + TypeJson string `json:"type_json,omitempty"` + + TypeName ColumnTypeName `json:"type_name"` + // Digits of precision; required on Create for DecimalTypes. + TypePrecision int `json:"type_precision,omitempty"` + // Digits to right of decimal; Required on Create for DecimalTypes. + TypeScale int `json:"type_scale,omitempty"` + // Full data type spec, SQL/catalogString text. + TypeText string `json:"type_text"` + + ForceSendFields []string `json:"-"` +} + +func (s *FunctionParameterInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FunctionParameterInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type FunctionParameterInfos struct { + // The array of __FunctionParameterInfo__ definitions of the function's + // parameters. + Parameters []FunctionParameterInfo `json:"parameters,omitempty"` +} + +// The mode of the function parameter. +type FunctionParameterMode string + +const FunctionParameterModeIn FunctionParameterMode = `IN` + +// String representation for [fmt.Print] +func (f *FunctionParameterMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionParameterMode) Set(v string) error { + switch v { + case `IN`: + *f = FunctionParameterMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "IN"`, v) + } +} + +// Type always returns FunctionParameterMode to satisfy [pflag.Value] interface +func (f *FunctionParameterMode) Type() string { + return "FunctionParameterMode" +} + +// The type of function parameter. +type FunctionParameterType string + +const FunctionParameterTypeColumn FunctionParameterType = `COLUMN` + +const FunctionParameterTypeParam FunctionParameterType = `PARAM` + +// String representation for [fmt.Print] +func (f *FunctionParameterType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionParameterType) Set(v string) error { + switch v { + case `COLUMN`, `PARAM`: + *f = FunctionParameterType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COLUMN", "PARAM"`, v) + } +} + +// Type always returns FunctionParameterType to satisfy [pflag.Value] interface +func (f *FunctionParameterType) Type() string { + return "FunctionParameterType" +} + +// GCP temporary credentials for API authentication. Read more at +// https://developers.google.com/identity/protocols/oauth2/service-account +type GcpOauthToken struct { + OauthToken string `json:"oauth_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GcpOauthToken) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GcpOauthToken) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The Azure cloud options to customize the requested temporary credential +type GenerateTemporaryServiceCredentialAzureOptions struct { + // The resources to which the temporary Azure credential should apply. These + // resources are the scopes that are passed to the token provider (see + // https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential?view=azure-python) + Resources []string `json:"resources,omitempty"` +} + +// The GCP cloud options to customize the requested temporary credential +type GenerateTemporaryServiceCredentialGcpOptions struct { + // The scopes to which the temporary GCP credential should apply. These + // resources are the scopes that are passed to the token provider (see + // https://google-auth.readthedocs.io/en/latest/reference/google.auth.html#google.auth.credentials.Credentials) + Scopes []string `json:"scopes,omitempty"` +} + +type GenerateTemporaryServiceCredentialRequest struct { + // The Azure cloud options to customize the requested temporary credential + AzureOptions *GenerateTemporaryServiceCredentialAzureOptions `json:"azure_options,omitempty"` + // The name of the service credential used to generate a temporary + // credential + CredentialName string `json:"credential_name"` + // The GCP cloud options to customize the requested temporary credential + GcpOptions *GenerateTemporaryServiceCredentialGcpOptions `json:"gcp_options,omitempty"` +} + +type GenerateTemporaryTableCredentialRequest struct { + // The operation performed against the table data, either READ or + // READ_WRITE. If READ_WRITE is specified, the credentials returned will + // have write permissions, otherwise, it will be read only. + Operation TableOperation `json:"operation,omitempty"` + // UUID of the table to read or write. + TableId string `json:"table_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GenerateTemporaryTableCredentialRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenerateTemporaryTableCredentialRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GenerateTemporaryTableCredentialResponse struct { + // AWS temporary credentials for API authentication. Read more at + // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. + AwsTempCredentials *AwsCredentials `json:"aws_temp_credentials,omitempty"` + // Azure Active Directory token, essentially the Oauth token for Azure + // Service Principal or Managed Identity. Read more at + // https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token + AzureAad *AzureActiveDirectoryToken `json:"azure_aad,omitempty"` + // Azure temporary credentials for API authentication. Read more at + // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas + AzureUserDelegationSas *AzureUserDelegationSas `json:"azure_user_delegation_sas,omitempty"` + // Server time when the credential will expire, in epoch milliseconds. The + // API client is advised to cache the credential given this expiration time. + ExpirationTime int64 `json:"expiration_time,omitempty"` + // GCP temporary credentials for API authentication. Read more at + // https://developers.google.com/identity/protocols/oauth2/service-account + GcpOauthToken *GcpOauthToken `json:"gcp_oauth_token,omitempty"` + // R2 temporary credentials for API authentication. Read more at + // https://developers.cloudflare.com/r2/api/s3/tokens/. + R2TempCredentials *R2Credentials `json:"r2_temp_credentials,omitempty"` + // The URL of the storage path accessible by the temporary credential. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GenerateTemporaryTableCredentialResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenerateTemporaryTableCredentialResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Gets the metastore assignment for a workspace +type GetAccountMetastoreAssignmentRequest struct { + // Workspace ID. + WorkspaceId int64 `json:"-" url:"-"` +} + +// Get a metastore +type GetAccountMetastoreRequest struct { + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` +} + +// Gets the named storage credential +type GetAccountStorageCredentialRequest struct { + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` + // Name of the storage credential. + StorageCredentialName string `json:"-" url:"-"` +} + +// Get an artifact allowlist +type GetArtifactAllowlistRequest struct { + // The artifact type of the allowlist. + ArtifactType ArtifactType `json:"-" url:"-"` +} + +// Get securable workspace bindings +type GetBindingsRequest struct { + // Maximum number of workspace bindings to return. - When set to 0, the page + // length is set to a server configured value (recommended); - When set to a + // value greater than 0, the page length is the minimum of this value and a + // server configured value; - When set to a value less than 0, an invalid + // parameter error is returned; - If not set, all the workspace bindings are + // returned (not recommended). + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + // The name of the securable. + SecurableName string `json:"-" url:"-"` + // The type of the securable to bind to a workspace. + SecurableType GetBindingsSecurableType `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetBindingsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetBindingsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GetBindingsSecurableType string + +const GetBindingsSecurableTypeCatalog GetBindingsSecurableType = `catalog` + +const GetBindingsSecurableTypeCredential GetBindingsSecurableType = `credential` + +const GetBindingsSecurableTypeExternalLocation GetBindingsSecurableType = `external_location` + +const GetBindingsSecurableTypeStorageCredential GetBindingsSecurableType = `storage_credential` + +// String representation for [fmt.Print] +func (f *GetBindingsSecurableType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetBindingsSecurableType) Set(v string) error { + switch v { + case `catalog`, `credential`, `external_location`, `storage_credential`: + *f = GetBindingsSecurableType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "catalog", "credential", "external_location", "storage_credential"`, v) + } +} + +// Type always returns GetBindingsSecurableType to satisfy [pflag.Value] interface +func (f *GetBindingsSecurableType) Type() string { + return "GetBindingsSecurableType" +} + +// Get Model Version By Alias +type GetByAliasRequest struct { + // The name of the alias + Alias string `json:"-" url:"-"` + // The three-level (fully qualified) name of the registered model + FullName string `json:"-" url:"-"` + // Whether to include aliases associated with the model version in the + // response + IncludeAliases bool `json:"-" url:"include_aliases,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetByAliasRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetByAliasRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get a catalog +type GetCatalogRequest struct { + // Whether to include catalogs in the response for which the principal can + // only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // The name of the catalog. + Name string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetCatalogRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetCatalogRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get a connection +type GetConnectionRequest struct { + // Name of the connection. + Name string `json:"-" url:"-"` +} + +// Get a credential +type GetCredentialRequest struct { + // Name of the credential. + NameArg string `json:"-" url:"-"` +} + +// Get effective permissions +type GetEffectiveRequest struct { + // Full name of securable. + FullName string `json:"-" url:"-"` + // If provided, only the effective permissions for the specified principal + // (user or group) are returned. + Principal string `json:"-" url:"principal,omitempty"` + // Type of securable. + SecurableType SecurableType `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetEffectiveRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetEffectiveRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get an external location +type GetExternalLocationRequest struct { + // Whether to include external locations in the response for which the + // principal can only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // Name of the external location. + Name string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetExternalLocationRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetExternalLocationRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get a function +type GetFunctionRequest struct { + // Whether to include functions in the response for which the principal can + // only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // The fully-qualified name of the function (of the form + // __catalog_name__.__schema_name__.__function__name__). + Name string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetFunctionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetFunctionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get permissions +type GetGrantRequest struct { + // Full name of securable. + FullName string `json:"-" url:"-"` + // If provided, only the permissions for the specified principal (user or + // group) are returned. + Principal string `json:"-" url:"principal,omitempty"` + // Type of securable. + SecurableType SecurableType `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetGrantRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetGrantRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get a metastore +type GetMetastoreRequest struct { + // Unique ID of the metastore. + Id string `json:"-" url:"-"` +} + +type GetMetastoreSummaryResponse struct { + // Cloud vendor of the metastore home shard (e.g., `aws`, `azure`, `gcp`). + Cloud string `json:"cloud,omitempty"` + // Time at which this metastore was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of metastore creator. + CreatedBy string `json:"created_by,omitempty"` + // Unique identifier of the metastore's (Default) Data Access Configuration. + DefaultDataAccessConfigId string `json:"default_data_access_config_id,omitempty"` + // The organization name of a Delta Sharing entity, to be used in + // Databricks-to-Databricks Delta Sharing as the official name. + DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"` + // The lifetime of delta sharing recipient token in seconds. + DeltaSharingRecipientTokenLifetimeInSeconds int64 `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"` + // The scope of Delta Sharing enabled for the metastore. + DeltaSharingScope GetMetastoreSummaryResponseDeltaSharingScope `json:"delta_sharing_scope,omitempty"` + // Whether to allow non-DBR clients to directly access entities under the + // metastore. + ExternalAccessEnabled bool `json:"external_access_enabled,omitempty"` + // Globally unique metastore ID across clouds and regions, of the form + // `cloud:region:metastore_id`. + GlobalMetastoreId string `json:"global_metastore_id,omitempty"` + // Unique identifier of metastore. + MetastoreId string `json:"metastore_id,omitempty"` + // The user-specified name of the metastore. + Name string `json:"name,omitempty"` + // The owner of the metastore. + Owner string `json:"owner,omitempty"` + // Privilege model version of the metastore, of the form `major.minor` + // (e.g., `1.0`). + PrivilegeModelVersion string `json:"privilege_model_version,omitempty"` + // Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). + Region string `json:"region,omitempty"` + // The storage root URL for metastore + StorageRoot string `json:"storage_root,omitempty"` + // UUID of storage credential to access the metastore storage_root. + StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"` + // Name of the storage credential to access the metastore storage_root. + StorageRootCredentialName string `json:"storage_root_credential_name,omitempty"` + // Time at which the metastore was last modified, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of user who last modified the metastore. + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetMetastoreSummaryResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetMetastoreSummaryResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The scope of Delta Sharing enabled for the metastore. +type GetMetastoreSummaryResponseDeltaSharingScope string + +const GetMetastoreSummaryResponseDeltaSharingScopeInternal GetMetastoreSummaryResponseDeltaSharingScope = `INTERNAL` + +const GetMetastoreSummaryResponseDeltaSharingScopeInternalAndExternal GetMetastoreSummaryResponseDeltaSharingScope = `INTERNAL_AND_EXTERNAL` + +// String representation for [fmt.Print] +func (f *GetMetastoreSummaryResponseDeltaSharingScope) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetMetastoreSummaryResponseDeltaSharingScope) Set(v string) error { + switch v { + case `INTERNAL`, `INTERNAL_AND_EXTERNAL`: + *f = GetMetastoreSummaryResponseDeltaSharingScope(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "INTERNAL", "INTERNAL_AND_EXTERNAL"`, v) + } +} + +// Type always returns GetMetastoreSummaryResponseDeltaSharingScope to satisfy [pflag.Value] interface +func (f *GetMetastoreSummaryResponseDeltaSharingScope) Type() string { + return "GetMetastoreSummaryResponseDeltaSharingScope" +} + +// Get a Model Version +type GetModelVersionRequest struct { + // The three-level (fully qualified) name of the model version + FullName string `json:"-" url:"-"` + // Whether to include aliases associated with the model version in the + // response + IncludeAliases bool `json:"-" url:"include_aliases,omitempty"` + // Whether to include model versions in the response for which the principal + // can only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // The integer version number of the model version + Version int `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetModelVersionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetModelVersionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get an Online Table +type GetOnlineTableRequest struct { + // Full three-part (catalog, schema, table) name of the table. + Name string `json:"-" url:"-"` +} + +// Get a table monitor +type GetQualityMonitorRequest struct { + // Full name of the table. + TableName string `json:"-" url:"-"` +} + +// Get information for a single resource quota. +type GetQuotaRequest struct { + // Full name of the parent resource. Provide the metastore ID if the parent + // is a metastore. + ParentFullName string `json:"-" url:"-"` + // Securable type of the quota parent. + ParentSecurableType string `json:"-" url:"-"` + // Name of the quota. Follows the pattern of the quota type, with "-quota" + // added as a suffix. + QuotaName string `json:"-" url:"-"` +} + +type GetQuotaResponse struct { + // The returned QuotaInfo. + QuotaInfo *QuotaInfo `json:"quota_info,omitempty"` +} + +// Get refresh +type GetRefreshRequest struct { + // ID of the refresh. + RefreshId string `json:"-" url:"-"` + // Full name of the table. + TableName string `json:"-" url:"-"` +} + +// Get a Registered Model +type GetRegisteredModelRequest struct { + // The three-level (fully qualified) name of the registered model + FullName string `json:"-" url:"-"` + // Whether to include registered model aliases in the response + IncludeAliases bool `json:"-" url:"include_aliases,omitempty"` + // Whether to include registered models in the response for which the + // principal can only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetRegisteredModelRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetRegisteredModelRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get a schema +type GetSchemaRequest struct { + // Full name of the schema. + FullName string `json:"-" url:"-"` + // Whether to include schemas in the response for which the principal can + // only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetSchemaRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetSchemaRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get a credential +type GetStorageCredentialRequest struct { + // Name of the storage credential. + Name string `json:"-" url:"-"` +} + +// Get a table +type GetTableRequest struct { + // Full name of the table. + FullName string `json:"-" url:"-"` + // Whether to include tables in the response for which the principal can + // only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // Whether delta metadata should be included in the response. + IncludeDeltaMetadata bool `json:"-" url:"include_delta_metadata,omitempty"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities bool `json:"-" url:"include_manifest_capabilities,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetTableRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetTableRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get catalog workspace bindings +type GetWorkspaceBindingRequest struct { + // The name of the catalog. + Name string `json:"-" url:"-"` +} + +type IsolationMode string + +const IsolationModeIsolationModeIsolated IsolationMode = `ISOLATION_MODE_ISOLATED` + +const IsolationModeIsolationModeOpen IsolationMode = `ISOLATION_MODE_OPEN` + +// String representation for [fmt.Print] +func (f *IsolationMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *IsolationMode) Set(v string) error { + switch v { + case `ISOLATION_MODE_ISOLATED`, `ISOLATION_MODE_OPEN`: + *f = IsolationMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ISOLATION_MODE_ISOLATED", "ISOLATION_MODE_OPEN"`, v) + } +} + +// Type always returns IsolationMode to satisfy [pflag.Value] interface +func (f *IsolationMode) Type() string { + return "IsolationMode" +} + +// Get all workspaces assigned to a metastore +type ListAccountMetastoreAssignmentsRequest struct { + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` +} + +// The list of workspaces to which the given metastore is assigned. +type ListAccountMetastoreAssignmentsResponse struct { + WorkspaceIds []int64 `json:"workspace_ids,omitempty"` +} + +// Get all storage credentials assigned to a metastore +type ListAccountStorageCredentialsRequest struct { + // Unity Catalog metastore ID + MetastoreId string `json:"-" url:"-"` +} + +type ListAccountStorageCredentialsResponse struct { + // An array of metastore storage credentials. + StorageCredentials []StorageCredentialInfo `json:"storage_credentials,omitempty"` +} + +// List catalogs +type ListCatalogsRequest struct { + // Whether to include catalogs in the response for which the principal can + // only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // Maximum number of catalogs to return. - when set to 0, the page length is + // set to a server configured value (recommended); - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to a value less than 0, an invalid parameter + // error is returned; - If not set, all valid catalogs are returned (not + // recommended). - Note: The number of returned catalogs might be less than + // the specified max_results size, even zero. The only definitive indication + // that no further catalogs can be fetched is when the next_page_token is + // unset from the response. + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListCatalogsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListCatalogsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListCatalogsResponse struct { + // An array of catalog information objects. + Catalogs []CatalogInfo `json:"catalogs,omitempty"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListCatalogsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListCatalogsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List connections +type ListConnectionsRequest struct { + // Maximum number of connections to return. - If not set, all connections + // are returned (not recommended). - when set to a value greater than 0, the + // page length is the minimum of this value and a server configured value; - + // when set to 0, the page length is set to a server configured value + // (recommended); - when set to a value less than 0, an invalid parameter + // error is returned; + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListConnectionsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListConnectionsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListConnectionsResponse struct { + // An array of connection information objects. + Connections []ConnectionInfo `json:"connections,omitempty"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListConnectionsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListConnectionsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List credentials +type ListCredentialsRequest struct { + // Maximum number of credentials to return. - If not set, the default max + // page size is used. - When set to a value greater than 0, the page length + // is the minimum of this value and a server-configured value. - When set to + // 0, the page length is set to a server-configured value (recommended). - + // When set to a value less than 0, an invalid parameter error is returned. + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque token to retrieve the next page of results. + PageToken string `json:"-" url:"page_token,omitempty"` + // Return only credentials for the specified purpose. + Purpose CredentialPurpose `json:"-" url:"purpose,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListCredentialsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListCredentialsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListCredentialsResponse struct { + Credentials []CredentialInfo `json:"credentials,omitempty"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListCredentialsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListCredentialsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List external locations +type ListExternalLocationsRequest struct { + // Whether to include external locations in the response for which the + // principal can only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // Maximum number of external locations to return. If not set, all the + // external locations are returned (not recommended). - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to 0, the page length is set to a server + // configured value (recommended); - when set to a value less than 0, an + // invalid parameter error is returned; + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListExternalLocationsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListExternalLocationsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListExternalLocationsResponse struct { + // An array of external locations. + ExternalLocations []ExternalLocationInfo `json:"external_locations,omitempty"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListExternalLocationsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListExternalLocationsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List functions +type ListFunctionsRequest struct { + // Name of parent catalog for functions of interest. + CatalogName string `json:"-" url:"catalog_name"` + // Whether to include functions in the response for which the principal can + // only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // Maximum number of functions to return. If not set, all the functions are + // returned (not recommended). - when set to a value greater than 0, the + // page length is the minimum of this value and a server configured value; - + // when set to 0, the page length is set to a server configured value + // (recommended); - when set to a value less than 0, an invalid parameter + // error is returned; + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + // Parent schema of functions. + SchemaName string `json:"-" url:"schema_name"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListFunctionsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListFunctionsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListFunctionsResponse struct { + // An array of function information objects. + Functions []FunctionInfo `json:"functions,omitempty"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListFunctionsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListFunctionsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListMetastoresResponse struct { + // An array of metastore information objects. + Metastores []MetastoreInfo `json:"metastores,omitempty"` +} + +// List Model Versions +type ListModelVersionsRequest struct { + // The full three-level name of the registered model under which to list + // model versions + FullName string `json:"-" url:"-"` + // Whether to include model versions in the response for which the principal + // can only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // Maximum number of model versions to return. If not set, the page length + // is set to a server configured value (100, as of 1/3/2024). - when set to + // a value greater than 0, the page length is the minimum of this value and + // a server configured value(1000, as of 1/3/2024); - when set to 0, the + // page length is set to a server configured value (100, as of 1/3/2024) + // (recommended); - when set to a value less than 0, an invalid parameter + // error is returned; + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListModelVersionsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListModelVersionsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListModelVersionsResponse struct { + ModelVersions []ModelVersionInfo `json:"model_versions,omitempty"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListModelVersionsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListModelVersionsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List all resource quotas under a metastore. +type ListQuotasRequest struct { + // The number of quotas to return. + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque token for the next page of results. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListQuotasRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListQuotasRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListQuotasResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request. + NextPageToken string `json:"next_page_token,omitempty"` + // An array of returned QuotaInfos. + Quotas []QuotaInfo `json:"quotas,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListQuotasResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListQuotasResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List refreshes +type ListRefreshesRequest struct { + // Full name of the table. + TableName string `json:"-" url:"-"` +} + +// List Registered Models +type ListRegisteredModelsRequest struct { + // The identifier of the catalog under which to list registered models. If + // specified, schema_name must be specified. + CatalogName string `json:"-" url:"catalog_name,omitempty"` + // Whether to include registered models in the response for which the + // principal can only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // Max number of registered models to return. + // + // If both catalog and schema are specified: - when max_results is not + // specified, the page length is set to a server configured value (10000, as + // of 4/2/2024). - when set to a value greater than 0, the page length is + // the minimum of this value and a server configured value (10000, as of + // 4/2/2024); - when set to 0, the page length is set to a server configured + // value (10000, as of 4/2/2024); - when set to a value less than 0, an + // invalid parameter error is returned; + // + // If neither schema nor catalog is specified: - when max_results is not + // specified, the page length is set to a server configured value (100, as + // of 4/2/2024). - when set to a value greater than 0, the page length is + // the minimum of this value and a server configured value (1000, as of + // 4/2/2024); - when set to 0, the page length is set to a server configured + // value (100, as of 4/2/2024); - when set to a value less than 0, an + // invalid parameter error is returned; + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque token to send for the next page of results (pagination). + PageToken string `json:"-" url:"page_token,omitempty"` + // The identifier of the schema under which to list registered models. If + // specified, catalog_name must be specified. + SchemaName string `json:"-" url:"schema_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListRegisteredModelsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListRegisteredModelsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListRegisteredModelsResponse struct { + // Opaque token for pagination. Omitted if there are no more results. + // page_token should be set to this value for fetching the next page. + NextPageToken string `json:"next_page_token,omitempty"` + + RegisteredModels []RegisteredModelInfo `json:"registered_models,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListRegisteredModelsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListRegisteredModelsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List schemas +type ListSchemasRequest struct { + // Parent catalog for schemas of interest. + CatalogName string `json:"-" url:"catalog_name"` + // Whether to include schemas in the response for which the principal can + // only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // Maximum number of schemas to return. If not set, all the schemas are + // returned (not recommended). - when set to a value greater than 0, the + // page length is the minimum of this value and a server configured value; - + // when set to 0, the page length is set to a server configured value + // (recommended); - when set to a value less than 0, an invalid parameter + // error is returned; + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListSchemasRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListSchemasRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListSchemasResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + // An array of schema information objects. + Schemas []SchemaInfo `json:"schemas,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListSchemasResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListSchemasResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List credentials +type ListStorageCredentialsRequest struct { + // Maximum number of storage credentials to return. If not set, all the + // storage credentials are returned (not recommended). - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to 0, the page length is set to a server + // configured value (recommended); - when set to a value less than 0, an + // invalid parameter error is returned; + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListStorageCredentialsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListStorageCredentialsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListStorageCredentialsResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + + StorageCredentials []StorageCredentialInfo `json:"storage_credentials,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListStorageCredentialsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListStorageCredentialsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List table summaries +type ListSummariesRequest struct { + // Name of parent catalog for tables of interest. + CatalogName string `json:"-" url:"catalog_name"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities bool `json:"-" url:"include_manifest_capabilities,omitempty"` + // Maximum number of summaries for tables to return. If not set, the page + // length is set to a server configured value (10000, as of 1/5/2024). - + // when set to a value greater than 0, the page length is the minimum of + // this value and a server configured value (10000, as of 1/5/2024); - when + // set to 0, the page length is set to a server configured value (10000, as + // of 1/5/2024) (recommended); - when set to a value less than 0, an invalid + // parameter error is returned; + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + // A sql LIKE pattern (% and _) for schema names. All schemas will be + // returned if not set or empty. + SchemaNamePattern string `json:"-" url:"schema_name_pattern,omitempty"` + // A sql LIKE pattern (% and _) for table names. All tables will be returned + // if not set or empty. + TableNamePattern string `json:"-" url:"table_name_pattern,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListSummariesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListSummariesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List system schemas +type ListSystemSchemasRequest struct { + // Maximum number of schemas to return. - When set to 0, the page length is + // set to a server configured value (recommended); - When set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - When set to a value less than 0, an invalid parameter + // error is returned; - If not set, all the schemas are returned (not + // recommended). + MaxResults int `json:"-" url:"max_results,omitempty"` + // The ID for the metastore in which the system schema resides. + MetastoreId string `json:"-" url:"-"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListSystemSchemasRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListSystemSchemasRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListSystemSchemasResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + // An array of system schema information objects. + Schemas []SystemSchemaInfo `json:"schemas,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListSystemSchemasResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListSystemSchemasResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListTableSummariesResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + // List of table summaries. + Tables []TableSummary `json:"tables,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListTableSummariesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListTableSummariesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List tables +type ListTablesRequest struct { + // Name of parent catalog for tables of interest. + CatalogName string `json:"-" url:"catalog_name"` + // Whether to include tables in the response for which the principal can + // only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // Whether delta metadata should be included in the response. + IncludeDeltaMetadata bool `json:"-" url:"include_delta_metadata,omitempty"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities bool `json:"-" url:"include_manifest_capabilities,omitempty"` + // Maximum number of tables to return. If not set, all the tables are + // returned (not recommended). - when set to a value greater than 0, the + // page length is the minimum of this value and a server configured value; - + // when set to 0, the page length is set to a server configured value + // (recommended); - when set to a value less than 0, an invalid parameter + // error is returned; + MaxResults int `json:"-" url:"max_results,omitempty"` + // Whether to omit the columns of the table from the response or not. + OmitColumns bool `json:"-" url:"omit_columns,omitempty"` + // Whether to omit the properties of the table from the response or not. + OmitProperties bool `json:"-" url:"omit_properties,omitempty"` + // Whether to omit the username of the table (e.g. owner, updated_by, + // created_by) from the response or not. + OmitUsername bool `json:"-" url:"omit_username,omitempty"` + // Opaque token to send for the next page of results (pagination). + PageToken string `json:"-" url:"page_token,omitempty"` + // Parent schema of tables. + SchemaName string `json:"-" url:"schema_name"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListTablesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListTablesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListTablesResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + // An array of table information objects. + Tables []TableInfo `json:"tables,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListTablesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListTablesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List Volumes +type ListVolumesRequest struct { + // The identifier of the catalog + CatalogName string `json:"-" url:"catalog_name"` + // Whether to include volumes in the response for which the principal can + // only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // Maximum number of volumes to return (page length). + // + // If not set, the page length is set to a server configured value (10000, + // as of 1/29/2024). - when set to a value greater than 0, the page length + // is the minimum of this value and a server configured value (10000, as of + // 1/29/2024); - when set to 0, the page length is set to a server + // configured value (10000, as of 1/29/2024) (recommended); - when set to a + // value less than 0, an invalid parameter error is returned; + // + // Note: this parameter controls only the maximum number of volumes to + // return. The actual number of volumes returned in a page may be smaller + // than this value, including 0, even if there are more pages. + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque token returned by a previous request. It must be included in the + // request to retrieve the next page of results (pagination). + PageToken string `json:"-" url:"page_token,omitempty"` + // The identifier of the schema + SchemaName string `json:"-" url:"schema_name"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListVolumesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListVolumesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListVolumesResponseContent struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request to retrieve the next page of results. + NextPageToken string `json:"next_page_token,omitempty"` + + Volumes []VolumeInfo `json:"volumes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListVolumesResponseContent) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListVolumesResponseContent) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The artifact pattern matching type +type MatchType string + +const MatchTypePrefixMatch MatchType = `PREFIX_MATCH` + +// String representation for [fmt.Print] +func (f *MatchType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MatchType) Set(v string) error { + switch v { + case `PREFIX_MATCH`: + *f = MatchType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PREFIX_MATCH"`, v) + } +} + +// Type always returns MatchType to satisfy [pflag.Value] interface +func (f *MatchType) Type() string { + return "MatchType" +} + +type MetastoreAssignment struct { + // The name of the default catalog in the metastore. + DefaultCatalogName string `json:"default_catalog_name,omitempty"` + // The unique ID of the metastore. + MetastoreId string `json:"metastore_id"` + // The unique ID of the Databricks workspace. + WorkspaceId int64 `json:"workspace_id"` + + ForceSendFields []string `json:"-"` +} + +func (s *MetastoreAssignment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MetastoreAssignment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type MetastoreInfo struct { + // Cloud vendor of the metastore home shard (e.g., `aws`, `azure`, `gcp`). + Cloud string `json:"cloud,omitempty"` + // Time at which this metastore was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of metastore creator. + CreatedBy string `json:"created_by,omitempty"` + // Unique identifier of the metastore's (Default) Data Access Configuration. + DefaultDataAccessConfigId string `json:"default_data_access_config_id,omitempty"` + // The organization name of a Delta Sharing entity, to be used in + // Databricks-to-Databricks Delta Sharing as the official name. + DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"` + // The lifetime of delta sharing recipient token in seconds. + DeltaSharingRecipientTokenLifetimeInSeconds int64 `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"` + // The scope of Delta Sharing enabled for the metastore. + DeltaSharingScope MetastoreInfoDeltaSharingScope `json:"delta_sharing_scope,omitempty"` + // Whether to allow non-DBR clients to directly access entities under the + // metastore. + ExternalAccessEnabled bool `json:"external_access_enabled,omitempty"` + // Globally unique metastore ID across clouds and regions, of the form + // `cloud:region:metastore_id`. + GlobalMetastoreId string `json:"global_metastore_id,omitempty"` + // Unique identifier of metastore. + MetastoreId string `json:"metastore_id,omitempty"` + // The user-specified name of the metastore. + Name string `json:"name,omitempty"` + // The owner of the metastore. + Owner string `json:"owner,omitempty"` + // Privilege model version of the metastore, of the form `major.minor` + // (e.g., `1.0`). + PrivilegeModelVersion string `json:"privilege_model_version,omitempty"` + // Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). + Region string `json:"region,omitempty"` + // The storage root URL for metastore + StorageRoot string `json:"storage_root,omitempty"` + // UUID of storage credential to access the metastore storage_root. + StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"` + // Name of the storage credential to access the metastore storage_root. + StorageRootCredentialName string `json:"storage_root_credential_name,omitempty"` + // Time at which the metastore was last modified, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of user who last modified the metastore. + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MetastoreInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MetastoreInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The scope of Delta Sharing enabled for the metastore. +type MetastoreInfoDeltaSharingScope string + +const MetastoreInfoDeltaSharingScopeInternal MetastoreInfoDeltaSharingScope = `INTERNAL` + +const MetastoreInfoDeltaSharingScopeInternalAndExternal MetastoreInfoDeltaSharingScope = `INTERNAL_AND_EXTERNAL` + +// String representation for [fmt.Print] +func (f *MetastoreInfoDeltaSharingScope) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MetastoreInfoDeltaSharingScope) Set(v string) error { + switch v { + case `INTERNAL`, `INTERNAL_AND_EXTERNAL`: + *f = MetastoreInfoDeltaSharingScope(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "INTERNAL", "INTERNAL_AND_EXTERNAL"`, v) + } +} + +// Type always returns MetastoreInfoDeltaSharingScope to satisfy [pflag.Value] interface +func (f *MetastoreInfoDeltaSharingScope) Type() string { + return "MetastoreInfoDeltaSharingScope" +} + +type ModelVersionInfo struct { + // List of aliases associated with the model version + Aliases []RegisteredModelAlias `json:"aliases,omitempty"` + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly bool `json:"browse_only,omitempty"` + // The name of the catalog containing the model version + CatalogName string `json:"catalog_name,omitempty"` + // The comment attached to the model version + Comment string `json:"comment,omitempty"` + + CreatedAt int64 `json:"created_at,omitempty"` + // The identifier of the user who created the model version + CreatedBy string `json:"created_by,omitempty"` + // The unique identifier of the model version + Id string `json:"id,omitempty"` + // The unique identifier of the metastore containing the model version + MetastoreId string `json:"metastore_id,omitempty"` + // The name of the parent registered model of the model version, relative to + // parent schema + ModelName string `json:"model_name,omitempty"` + // Model version dependencies, for feature-store packaged models + ModelVersionDependencies *DependencyList `json:"model_version_dependencies,omitempty"` + // MLflow run ID used when creating the model version, if ``source`` was + // generated by an experiment run stored in an MLflow tracking server + RunId string `json:"run_id,omitempty"` + // ID of the Databricks workspace containing the MLflow run that generated + // this model version, if applicable + RunWorkspaceId int `json:"run_workspace_id,omitempty"` + // The name of the schema containing the model version, relative to parent + // catalog + SchemaName string `json:"schema_name,omitempty"` + // URI indicating the location of the source artifacts (files) for the model + // version + Source string `json:"source,omitempty"` + // Current status of the model version. Newly created model versions start + // in PENDING_REGISTRATION status, then move to READY status once the model + // version files are uploaded and the model version is finalized. Only model + // versions in READY status can be loaded for inference or served. + Status ModelVersionInfoStatus `json:"status,omitempty"` + // The storage location on the cloud under which model version data files + // are stored + StorageLocation string `json:"storage_location,omitempty"` + + UpdatedAt int64 `json:"updated_at,omitempty"` + // The identifier of the user who updated the model version last time + UpdatedBy string `json:"updated_by,omitempty"` + // Integer model version number, used to reference the model version in API + // requests. + Version int `json:"version,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ModelVersionInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ModelVersionInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Current status of the model version. Newly created model versions start in +// PENDING_REGISTRATION status, then move to READY status once the model version +// files are uploaded and the model version is finalized. Only model versions in +// READY status can be loaded for inference or served. +type ModelVersionInfoStatus string + +const ModelVersionInfoStatusFailedRegistration ModelVersionInfoStatus = `FAILED_REGISTRATION` + +const ModelVersionInfoStatusPendingRegistration ModelVersionInfoStatus = `PENDING_REGISTRATION` + +const ModelVersionInfoStatusReady ModelVersionInfoStatus = `READY` + +// String representation for [fmt.Print] +func (f *ModelVersionInfoStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ModelVersionInfoStatus) Set(v string) error { + switch v { + case `FAILED_REGISTRATION`, `PENDING_REGISTRATION`, `READY`: + *f = ModelVersionInfoStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED_REGISTRATION", "PENDING_REGISTRATION", "READY"`, v) + } +} + +// Type always returns ModelVersionInfoStatus to satisfy [pflag.Value] interface +func (f *ModelVersionInfoStatus) Type() string { + return "ModelVersionInfoStatus" +} + +type MonitorCronSchedule struct { + // Read only field that indicates whether a schedule is paused or not. + PauseStatus MonitorCronSchedulePauseStatus `json:"pause_status,omitempty"` + // The expression that determines when to run the monitor. See [examples]. + // + // [examples]: https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html + QuartzCronExpression string `json:"quartz_cron_expression"` + // The timezone id (e.g., ``"PST"``) in which to evaluate the quartz + // expression. + TimezoneId string `json:"timezone_id"` +} + +// Read only field that indicates whether a schedule is paused or not. +type MonitorCronSchedulePauseStatus string + +const MonitorCronSchedulePauseStatusPaused MonitorCronSchedulePauseStatus = `PAUSED` + +const MonitorCronSchedulePauseStatusUnpaused MonitorCronSchedulePauseStatus = `UNPAUSED` + +// String representation for [fmt.Print] +func (f *MonitorCronSchedulePauseStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorCronSchedulePauseStatus) Set(v string) error { + switch v { + case `PAUSED`, `UNPAUSED`: + *f = MonitorCronSchedulePauseStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PAUSED", "UNPAUSED"`, v) + } +} + +// Type always returns MonitorCronSchedulePauseStatus to satisfy [pflag.Value] interface +func (f *MonitorCronSchedulePauseStatus) Type() string { + return "MonitorCronSchedulePauseStatus" +} + +type MonitorDataClassificationConfig struct { + // Whether data classification is enabled. + Enabled bool `json:"enabled,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MonitorDataClassificationConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MonitorDataClassificationConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type MonitorDestination struct { + // The list of email addresses to send the notification to. A maximum of 5 + // email addresses is supported. + EmailAddresses []string `json:"email_addresses,omitempty"` +} + +type MonitorInferenceLog struct { + // Granularities for aggregating data into time windows based on their + // timestamp. Currently the following static granularities are supported: + // {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" + // week(s)"``, ``"1 month"``, ``"1 year"``}. + Granularities []string `json:"granularities"` + // Optional column that contains the ground truth for the prediction. + LabelCol string `json:"label_col,omitempty"` + // Column that contains the id of the model generating the predictions. + // Metrics will be computed per model id by default, and also across all + // model ids. + ModelIdCol string `json:"model_id_col"` + // Column that contains the output/prediction from the model. + PredictionCol string `json:"prediction_col"` + // Optional column that contains the prediction probabilities for each class + // in a classification problem type. The values in this column should be a + // map, mapping each class label to the prediction probability for a given + // sample. The map should be of PySpark MapType(). + PredictionProbaCol string `json:"prediction_proba_col,omitempty"` + // Problem type the model aims to solve. Determines the type of + // model-quality metrics that will be computed. + ProblemType MonitorInferenceLogProblemType `json:"problem_type"` + // Column that contains the timestamps of requests. The column must be one + // of the following: - A ``TimestampType`` column - A column whose values + // can be converted to timestamps through the pyspark ``to_timestamp`` + // [function]. + // + // [function]: https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html + TimestampCol string `json:"timestamp_col"` + + ForceSendFields []string `json:"-"` +} + +func (s *MonitorInferenceLog) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MonitorInferenceLog) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Problem type the model aims to solve. Determines the type of model-quality +// metrics that will be computed. +type MonitorInferenceLogProblemType string + +const MonitorInferenceLogProblemTypeProblemTypeClassification MonitorInferenceLogProblemType = `PROBLEM_TYPE_CLASSIFICATION` + +const MonitorInferenceLogProblemTypeProblemTypeRegression MonitorInferenceLogProblemType = `PROBLEM_TYPE_REGRESSION` + +// String representation for [fmt.Print] +func (f *MonitorInferenceLogProblemType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorInferenceLogProblemType) Set(v string) error { + switch v { + case `PROBLEM_TYPE_CLASSIFICATION`, `PROBLEM_TYPE_REGRESSION`: + *f = MonitorInferenceLogProblemType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PROBLEM_TYPE_CLASSIFICATION", "PROBLEM_TYPE_REGRESSION"`, v) + } +} + +// Type always returns MonitorInferenceLogProblemType to satisfy [pflag.Value] interface +func (f *MonitorInferenceLogProblemType) Type() string { + return "MonitorInferenceLogProblemType" +} + +type MonitorInfo struct { + // The directory to store monitoring assets (e.g. dashboard, metric tables). + AssetsDir string `json:"assets_dir,omitempty"` + // Name of the baseline table from which drift metrics are computed from. + // Columns in the monitored table should also be present in the baseline + // table. + BaselineTableName string `json:"baseline_table_name,omitempty"` + // Custom metrics to compute on the monitored table. These can be aggregate + // metrics, derived metrics (from already computed aggregate metrics), or + // drift metrics (comparing metrics across time windows). + CustomMetrics []MonitorMetric `json:"custom_metrics,omitempty"` + // Id of dashboard that visualizes the computed metrics. This can be empty + // if the monitor is in PENDING state. + DashboardId string `json:"dashboard_id,omitempty"` + // The data classification config for the monitor. + DataClassificationConfig *MonitorDataClassificationConfig `json:"data_classification_config,omitempty"` + // The full name of the drift metrics table. Format: + // __catalog_name__.__schema_name__.__table_name__. + DriftMetricsTableName string `json:"drift_metrics_table_name"` + // Configuration for monitoring inference logs. + InferenceLog *MonitorInferenceLog `json:"inference_log,omitempty"` + // The latest failure message of the monitor (if any). + LatestMonitorFailureMsg string `json:"latest_monitor_failure_msg,omitempty"` + // The version of the monitor config (e.g. 1,2,3). If negative, the monitor + // may be corrupted. + MonitorVersion string `json:"monitor_version"` + // The notification settings for the monitor. + Notifications *MonitorNotifications `json:"notifications,omitempty"` + // Schema where output metric tables are created. + OutputSchemaName string `json:"output_schema_name,omitempty"` + // The full name of the profile metrics table. Format: + // __catalog_name__.__schema_name__.__table_name__. + ProfileMetricsTableName string `json:"profile_metrics_table_name"` + // The schedule for automatically updating and refreshing metric tables. + Schedule *MonitorCronSchedule `json:"schedule,omitempty"` + // List of column expressions to slice data with for targeted analysis. The + // data is grouped by each expression independently, resulting in a separate + // slice for each predicate and its complements. For high-cardinality + // columns, only the top 100 unique values by frequency will generate + // slices. + SlicingExprs []string `json:"slicing_exprs,omitempty"` + // Configuration for monitoring snapshot tables. + Snapshot *MonitorSnapshot `json:"snapshot,omitempty"` + // The status of the monitor. + Status MonitorInfoStatus `json:"status"` + // The full name of the table to monitor. Format: + // __catalog_name__.__schema_name__.__table_name__. + TableName string `json:"table_name"` + // Configuration for monitoring time series tables. + TimeSeries *MonitorTimeSeries `json:"time_series,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MonitorInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MonitorInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The status of the monitor. +type MonitorInfoStatus string + +const MonitorInfoStatusMonitorStatusActive MonitorInfoStatus = `MONITOR_STATUS_ACTIVE` + +const MonitorInfoStatusMonitorStatusDeletePending MonitorInfoStatus = `MONITOR_STATUS_DELETE_PENDING` + +const MonitorInfoStatusMonitorStatusError MonitorInfoStatus = `MONITOR_STATUS_ERROR` + +const MonitorInfoStatusMonitorStatusFailed MonitorInfoStatus = `MONITOR_STATUS_FAILED` + +const MonitorInfoStatusMonitorStatusPending MonitorInfoStatus = `MONITOR_STATUS_PENDING` + +// String representation for [fmt.Print] +func (f *MonitorInfoStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorInfoStatus) Set(v string) error { + switch v { + case `MONITOR_STATUS_ACTIVE`, `MONITOR_STATUS_DELETE_PENDING`, `MONITOR_STATUS_ERROR`, `MONITOR_STATUS_FAILED`, `MONITOR_STATUS_PENDING`: + *f = MonitorInfoStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MONITOR_STATUS_ACTIVE", "MONITOR_STATUS_DELETE_PENDING", "MONITOR_STATUS_ERROR", "MONITOR_STATUS_FAILED", "MONITOR_STATUS_PENDING"`, v) + } +} + +// Type always returns MonitorInfoStatus to satisfy [pflag.Value] interface +func (f *MonitorInfoStatus) Type() string { + return "MonitorInfoStatus" +} + +type MonitorMetric struct { + // Jinja template for a SQL expression that specifies how to compute the + // metric. See [create metric definition]. + // + // [create metric definition]: https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition + Definition string `json:"definition"` + // A list of column names in the input table the metric should be computed + // for. Can use ``":table"`` to indicate that the metric needs information + // from multiple columns. + InputColumns []string `json:"input_columns"` + // Name of the metric in the output tables. + Name string `json:"name"` + // The output type of the custom metric. + OutputDataType string `json:"output_data_type"` + // Can only be one of ``"CUSTOM_METRIC_TYPE_AGGREGATE"``, + // ``"CUSTOM_METRIC_TYPE_DERIVED"``, or ``"CUSTOM_METRIC_TYPE_DRIFT"``. The + // ``"CUSTOM_METRIC_TYPE_AGGREGATE"`` and ``"CUSTOM_METRIC_TYPE_DERIVED"`` + // metrics are computed on a single table, whereas the + // ``"CUSTOM_METRIC_TYPE_DRIFT"`` compare metrics across baseline and input + // table, or across the two consecutive time windows. - + // CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your + // table - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed + // aggregate metrics - CUSTOM_METRIC_TYPE_DRIFT: depend on previously + // computed aggregate or derived metrics + Type MonitorMetricType `json:"type"` +} + +// Can only be one of “"CUSTOM_METRIC_TYPE_AGGREGATE"“, +// “"CUSTOM_METRIC_TYPE_DERIVED"“, or “"CUSTOM_METRIC_TYPE_DRIFT"“. The +// “"CUSTOM_METRIC_TYPE_AGGREGATE"“ and “"CUSTOM_METRIC_TYPE_DERIVED"“ +// metrics are computed on a single table, whereas the +// “"CUSTOM_METRIC_TYPE_DRIFT"“ compare metrics across baseline and input +// table, or across the two consecutive time windows. - +// CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your +// table - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate +// metrics - CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate +// or derived metrics +type MonitorMetricType string + +const MonitorMetricTypeCustomMetricTypeAggregate MonitorMetricType = `CUSTOM_METRIC_TYPE_AGGREGATE` + +const MonitorMetricTypeCustomMetricTypeDerived MonitorMetricType = `CUSTOM_METRIC_TYPE_DERIVED` + +const MonitorMetricTypeCustomMetricTypeDrift MonitorMetricType = `CUSTOM_METRIC_TYPE_DRIFT` + +// String representation for [fmt.Print] +func (f *MonitorMetricType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorMetricType) Set(v string) error { + switch v { + case `CUSTOM_METRIC_TYPE_AGGREGATE`, `CUSTOM_METRIC_TYPE_DERIVED`, `CUSTOM_METRIC_TYPE_DRIFT`: + *f = MonitorMetricType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CUSTOM_METRIC_TYPE_AGGREGATE", "CUSTOM_METRIC_TYPE_DERIVED", "CUSTOM_METRIC_TYPE_DRIFT"`, v) + } +} + +// Type always returns MonitorMetricType to satisfy [pflag.Value] interface +func (f *MonitorMetricType) Type() string { + return "MonitorMetricType" +} + +type MonitorNotifications struct { + // Who to send notifications to on monitor failure. + OnFailure *MonitorDestination `json:"on_failure,omitempty"` + // Who to send notifications to when new data classification tags are + // detected. + OnNewClassificationTagDetected *MonitorDestination `json:"on_new_classification_tag_detected,omitempty"` +} + +type MonitorRefreshInfo struct { + // Time at which refresh operation completed (milliseconds since 1/1/1970 + // UTC). + EndTimeMs int64 `json:"end_time_ms,omitempty"` + // An optional message to give insight into the current state of the job + // (e.g. FAILURE messages). + Message string `json:"message,omitempty"` + // Unique id of the refresh operation. + RefreshId int64 `json:"refresh_id"` + // Time at which refresh operation was initiated (milliseconds since + // 1/1/1970 UTC). + StartTimeMs int64 `json:"start_time_ms"` + // The current state of the refresh. + State MonitorRefreshInfoState `json:"state"` + // The method by which the refresh was triggered. + Trigger MonitorRefreshInfoTrigger `json:"trigger,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MonitorRefreshInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MonitorRefreshInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The current state of the refresh. +type MonitorRefreshInfoState string + +const MonitorRefreshInfoStateCanceled MonitorRefreshInfoState = `CANCELED` + +const MonitorRefreshInfoStateFailed MonitorRefreshInfoState = `FAILED` + +const MonitorRefreshInfoStatePending MonitorRefreshInfoState = `PENDING` + +const MonitorRefreshInfoStateRunning MonitorRefreshInfoState = `RUNNING` + +const MonitorRefreshInfoStateSuccess MonitorRefreshInfoState = `SUCCESS` + +// String representation for [fmt.Print] +func (f *MonitorRefreshInfoState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorRefreshInfoState) Set(v string) error { + switch v { + case `CANCELED`, `FAILED`, `PENDING`, `RUNNING`, `SUCCESS`: + *f = MonitorRefreshInfoState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "FAILED", "PENDING", "RUNNING", "SUCCESS"`, v) + } +} + +// Type always returns MonitorRefreshInfoState to satisfy [pflag.Value] interface +func (f *MonitorRefreshInfoState) Type() string { + return "MonitorRefreshInfoState" +} + +// The method by which the refresh was triggered. +type MonitorRefreshInfoTrigger string + +const MonitorRefreshInfoTriggerManual MonitorRefreshInfoTrigger = `MANUAL` + +const MonitorRefreshInfoTriggerSchedule MonitorRefreshInfoTrigger = `SCHEDULE` + +// String representation for [fmt.Print] +func (f *MonitorRefreshInfoTrigger) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorRefreshInfoTrigger) Set(v string) error { + switch v { + case `MANUAL`, `SCHEDULE`: + *f = MonitorRefreshInfoTrigger(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MANUAL", "SCHEDULE"`, v) + } +} + +// Type always returns MonitorRefreshInfoTrigger to satisfy [pflag.Value] interface +func (f *MonitorRefreshInfoTrigger) Type() string { + return "MonitorRefreshInfoTrigger" +} + +type MonitorRefreshListResponse struct { + // List of refreshes. + Refreshes []MonitorRefreshInfo `json:"refreshes,omitempty"` +} + +type MonitorSnapshot struct { +} + +type MonitorTimeSeries struct { + // Granularities for aggregating data into time windows based on their + // timestamp. Currently the following static granularities are supported: + // {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" + // week(s)"``, ``"1 month"``, ``"1 year"``}. + Granularities []string `json:"granularities"` + // Column that contains the timestamps of requests. The column must be one + // of the following: - A ``TimestampType`` column - A column whose values + // can be converted to timestamps through the pyspark ``to_timestamp`` + // [function]. + // + // [function]: https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html + TimestampCol string `json:"timestamp_col"` +} + +type NamedTableConstraint struct { + // The name of the constraint. + Name string `json:"name"` +} + +// Online Table information. +type OnlineTable struct { + // Full three-part (catalog, schema, table) name of the table. + Name string `json:"name,omitempty"` + // Specification of the online table. + Spec *OnlineTableSpec `json:"spec,omitempty"` + // Online Table data synchronization status + Status *OnlineTableStatus `json:"status,omitempty"` + // Data serving REST API URL for this table + TableServingUrl string `json:"table_serving_url,omitempty"` + // The provisioning state of the online table entity in Unity Catalog. This + // is distinct from the state of the data synchronization pipeline (i.e. the + // table may be in "ACTIVE" but the pipeline may be in "PROVISIONING" as it + // runs asynchronously). + UnityCatalogProvisioningState ProvisioningInfoState `json:"unity_catalog_provisioning_state,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *OnlineTable) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s OnlineTable) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Specification of an online table. +type OnlineTableSpec struct { + // Whether to create a full-copy pipeline -- a pipeline that stops after + // creates a full copy of the source table upon initialization and does not + // process any change data feeds (CDFs) afterwards. The pipeline can still + // be manually triggered afterwards, but it always perform a full copy of + // the source table and there are no incremental updates. This mode is + // useful for syncing views or tables without CDFs to online tables. Note + // that the full-copy pipeline only supports "triggered" scheduling policy. + PerformFullCopy bool `json:"perform_full_copy,omitempty"` + // ID of the associated pipeline. Generated by the server - cannot be set by + // the caller. + PipelineId string `json:"pipeline_id,omitempty"` + // Primary Key columns to be used for data insert/update in the destination. + PrimaryKeyColumns []string `json:"primary_key_columns,omitempty"` + // Pipeline runs continuously after generating the initial data. + RunContinuously *OnlineTableSpecContinuousSchedulingPolicy `json:"run_continuously,omitempty"` + // Pipeline stops after generating the initial data and can be triggered + // later (manually, through a cron job or through data triggers) + RunTriggered *OnlineTableSpecTriggeredSchedulingPolicy `json:"run_triggered,omitempty"` + // Three-part (catalog, schema, table) name of the source Delta table. + SourceTableFullName string `json:"source_table_full_name,omitempty"` + // Time series key to deduplicate (tie-break) rows with the same primary + // key. + TimeseriesKey string `json:"timeseries_key,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *OnlineTableSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s OnlineTableSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type OnlineTableSpecContinuousSchedulingPolicy struct { +} + +type OnlineTableSpecTriggeredSchedulingPolicy struct { +} + +// The state of an online table. +type OnlineTableState string + +const OnlineTableStateOffline OnlineTableState = `OFFLINE` + +const OnlineTableStateOfflineFailed OnlineTableState = `OFFLINE_FAILED` + +const OnlineTableStateOnline OnlineTableState = `ONLINE` + +const OnlineTableStateOnlineContinuousUpdate OnlineTableState = `ONLINE_CONTINUOUS_UPDATE` + +const OnlineTableStateOnlineNoPendingUpdate OnlineTableState = `ONLINE_NO_PENDING_UPDATE` + +const OnlineTableStateOnlinePipelineFailed OnlineTableState = `ONLINE_PIPELINE_FAILED` + +const OnlineTableStateOnlineTriggeredUpdate OnlineTableState = `ONLINE_TRIGGERED_UPDATE` + +const OnlineTableStateOnlineUpdatingPipelineResources OnlineTableState = `ONLINE_UPDATING_PIPELINE_RESOURCES` + +const OnlineTableStateProvisioning OnlineTableState = `PROVISIONING` + +const OnlineTableStateProvisioningInitialSnapshot OnlineTableState = `PROVISIONING_INITIAL_SNAPSHOT` + +const OnlineTableStateProvisioningPipelineResources OnlineTableState = `PROVISIONING_PIPELINE_RESOURCES` + +// String representation for [fmt.Print] +func (f *OnlineTableState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *OnlineTableState) Set(v string) error { + switch v { + case `OFFLINE`, `OFFLINE_FAILED`, `ONLINE`, `ONLINE_CONTINUOUS_UPDATE`, `ONLINE_NO_PENDING_UPDATE`, `ONLINE_PIPELINE_FAILED`, `ONLINE_TRIGGERED_UPDATE`, `ONLINE_UPDATING_PIPELINE_RESOURCES`, `PROVISIONING`, `PROVISIONING_INITIAL_SNAPSHOT`, `PROVISIONING_PIPELINE_RESOURCES`: + *f = OnlineTableState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "OFFLINE", "OFFLINE_FAILED", "ONLINE", "ONLINE_CONTINUOUS_UPDATE", "ONLINE_NO_PENDING_UPDATE", "ONLINE_PIPELINE_FAILED", "ONLINE_TRIGGERED_UPDATE", "ONLINE_UPDATING_PIPELINE_RESOURCES", "PROVISIONING", "PROVISIONING_INITIAL_SNAPSHOT", "PROVISIONING_PIPELINE_RESOURCES"`, v) + } +} + +// Type always returns OnlineTableState to satisfy [pflag.Value] interface +func (f *OnlineTableState) Type() string { + return "OnlineTableState" +} + +// Status of an online table. +type OnlineTableStatus struct { + // Detailed status of an online table. Shown if the online table is in the + // ONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state. + ContinuousUpdateStatus *ContinuousUpdateStatus `json:"continuous_update_status,omitempty"` + // The state of the online table. + DetailedState OnlineTableState `json:"detailed_state,omitempty"` + // Detailed status of an online table. Shown if the online table is in the + // OFFLINE_FAILED or the ONLINE_PIPELINE_FAILED state. + FailedStatus *FailedStatus `json:"failed_status,omitempty"` + // A text description of the current state of the online table. + Message string `json:"message,omitempty"` + // Detailed status of an online table. Shown if the online table is in the + // PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT + // state. + ProvisioningStatus *ProvisioningStatus `json:"provisioning_status,omitempty"` + // Detailed status of an online table. Shown if the online table is in the + // ONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state. + TriggeredUpdateStatus *TriggeredUpdateStatus `json:"triggered_update_status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *OnlineTableStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s OnlineTableStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PermissionsChange struct { + // The set of privileges to add. + Add []Privilege `json:"add,omitempty"` + // The principal whose privileges we are changing. + Principal string `json:"principal,omitempty"` + // The set of privileges to remove. + Remove []Privilege `json:"remove,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PermissionsChange) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PermissionsChange) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PermissionsList struct { + // The privileges assigned to each principal + PrivilegeAssignments []PrivilegeAssignment `json:"privilege_assignments,omitempty"` +} + +// Progress information of the Online Table data synchronization pipeline. +type PipelineProgress struct { + // The estimated time remaining to complete this update in seconds. + EstimatedCompletionTimeSeconds float64 `json:"estimated_completion_time_seconds,omitempty"` + // The source table Delta version that was last processed by the pipeline. + // The pipeline may not have completely processed this version yet. + LatestVersionCurrentlyProcessing int64 `json:"latest_version_currently_processing,omitempty"` + // The completion ratio of this update. This is a number between 0 and 1. + SyncProgressCompletion float64 `json:"sync_progress_completion,omitempty"` + // The number of rows that have been synced in this update. + SyncedRowCount int64 `json:"synced_row_count,omitempty"` + // The total number of rows that need to be synced in this update. This + // number may be an estimate. + TotalRowCount int64 `json:"total_row_count,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineProgress) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineProgress) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PrimaryKeyConstraint struct { + // Column names for this constraint. + ChildColumns []string `json:"child_columns"` + // The name of the constraint. + Name string `json:"name"` +} + +type Privilege string + +const PrivilegeAccess Privilege = `ACCESS` + +const PrivilegeAllPrivileges Privilege = `ALL_PRIVILEGES` + +const PrivilegeApplyTag Privilege = `APPLY_TAG` + +const PrivilegeCreate Privilege = `CREATE` + +const PrivilegeCreateCatalog Privilege = `CREATE_CATALOG` + +const PrivilegeCreateConnection Privilege = `CREATE_CONNECTION` + +const PrivilegeCreateExternalLocation Privilege = `CREATE_EXTERNAL_LOCATION` + +const PrivilegeCreateExternalTable Privilege = `CREATE_EXTERNAL_TABLE` + +const PrivilegeCreateExternalVolume Privilege = `CREATE_EXTERNAL_VOLUME` + +const PrivilegeCreateForeignCatalog Privilege = `CREATE_FOREIGN_CATALOG` + +const PrivilegeCreateForeignSecurable Privilege = `CREATE_FOREIGN_SECURABLE` + +const PrivilegeCreateFunction Privilege = `CREATE_FUNCTION` + +const PrivilegeCreateManagedStorage Privilege = `CREATE_MANAGED_STORAGE` + +const PrivilegeCreateMaterializedView Privilege = `CREATE_MATERIALIZED_VIEW` + +const PrivilegeCreateModel Privilege = `CREATE_MODEL` + +const PrivilegeCreateProvider Privilege = `CREATE_PROVIDER` + +const PrivilegeCreateRecipient Privilege = `CREATE_RECIPIENT` + +const PrivilegeCreateSchema Privilege = `CREATE_SCHEMA` + +const PrivilegeCreateServiceCredential Privilege = `CREATE_SERVICE_CREDENTIAL` + +const PrivilegeCreateShare Privilege = `CREATE_SHARE` + +const PrivilegeCreateStorageCredential Privilege = `CREATE_STORAGE_CREDENTIAL` + +const PrivilegeCreateTable Privilege = `CREATE_TABLE` + +const PrivilegeCreateView Privilege = `CREATE_VIEW` + +const PrivilegeCreateVolume Privilege = `CREATE_VOLUME` + +const PrivilegeExecute Privilege = `EXECUTE` + +const PrivilegeManage Privilege = `MANAGE` + +const PrivilegeManageAllowlist Privilege = `MANAGE_ALLOWLIST` + +const PrivilegeModify Privilege = `MODIFY` + +const PrivilegeReadFiles Privilege = `READ_FILES` + +const PrivilegeReadPrivateFiles Privilege = `READ_PRIVATE_FILES` + +const PrivilegeReadVolume Privilege = `READ_VOLUME` + +const PrivilegeRefresh Privilege = `REFRESH` + +const PrivilegeSelect Privilege = `SELECT` + +const PrivilegeSetSharePermission Privilege = `SET_SHARE_PERMISSION` + +const PrivilegeUsage Privilege = `USAGE` + +const PrivilegeUseCatalog Privilege = `USE_CATALOG` + +const PrivilegeUseConnection Privilege = `USE_CONNECTION` + +const PrivilegeUseMarketplaceAssets Privilege = `USE_MARKETPLACE_ASSETS` + +const PrivilegeUseProvider Privilege = `USE_PROVIDER` + +const PrivilegeUseRecipient Privilege = `USE_RECIPIENT` + +const PrivilegeUseSchema Privilege = `USE_SCHEMA` + +const PrivilegeUseShare Privilege = `USE_SHARE` + +const PrivilegeWriteFiles Privilege = `WRITE_FILES` + +const PrivilegeWritePrivateFiles Privilege = `WRITE_PRIVATE_FILES` + +const PrivilegeWriteVolume Privilege = `WRITE_VOLUME` + +// String representation for [fmt.Print] +func (f *Privilege) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Privilege) Set(v string) error { + switch v { + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + *f = Privilege(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + } +} + +// Type always returns Privilege to satisfy [pflag.Value] interface +func (f *Privilege) Type() string { + return "Privilege" +} + +type PrivilegeAssignment struct { + // The principal (user email address or group name). + Principal string `json:"principal,omitempty"` + // The privileges assigned to the principal. + Privileges []Privilege `json:"privileges,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PrivilegeAssignment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PrivilegeAssignment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// An object containing map of key-value properties attached to the connection. +type PropertiesKvPairs map[string]string + +// Status of an asynchronously provisioned resource. +type ProvisioningInfo struct { + State ProvisioningInfoState `json:"state,omitempty"` +} + +type ProvisioningInfoState string + +const ProvisioningInfoStateActive ProvisioningInfoState = `ACTIVE` + +const ProvisioningInfoStateDegraded ProvisioningInfoState = `DEGRADED` + +const ProvisioningInfoStateDeleting ProvisioningInfoState = `DELETING` + +const ProvisioningInfoStateFailed ProvisioningInfoState = `FAILED` + +const ProvisioningInfoStateProvisioning ProvisioningInfoState = `PROVISIONING` + +const ProvisioningInfoStateUpdating ProvisioningInfoState = `UPDATING` + +// String representation for [fmt.Print] +func (f *ProvisioningInfoState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ProvisioningInfoState) Set(v string) error { + switch v { + case `ACTIVE`, `DEGRADED`, `DELETING`, `FAILED`, `PROVISIONING`, `UPDATING`: + *f = ProvisioningInfoState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "DEGRADED", "DELETING", "FAILED", "PROVISIONING", "UPDATING"`, v) + } +} + +// Type always returns ProvisioningInfoState to satisfy [pflag.Value] interface +func (f *ProvisioningInfoState) Type() string { + return "ProvisioningInfoState" +} + +// Detailed status of an online table. Shown if the online table is in the +// PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state. +type ProvisioningStatus struct { + // Details about initial data synchronization. Only populated when in the + // PROVISIONING_INITIAL_SNAPSHOT state. + InitialPipelineSyncProgress *PipelineProgress `json:"initial_pipeline_sync_progress,omitempty"` +} + +type QuotaInfo struct { + // The timestamp that indicates when the quota count was last updated. + LastRefreshedAt int64 `json:"last_refreshed_at,omitempty"` + // Name of the parent resource. Returns metastore ID if the parent is a + // metastore. + ParentFullName string `json:"parent_full_name,omitempty"` + // The quota parent securable type. + ParentSecurableType SecurableType `json:"parent_securable_type,omitempty"` + // The current usage of the resource quota. + QuotaCount int `json:"quota_count,omitempty"` + // The current limit of the resource quota. + QuotaLimit int `json:"quota_limit,omitempty"` + // The name of the quota. + QuotaName string `json:"quota_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QuotaInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QuotaInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// R2 temporary credentials for API authentication. Read more at +// https://developers.cloudflare.com/r2/api/s3/tokens/. +type R2Credentials struct { + // The access key ID that identifies the temporary credentials. + AccessKeyId string `json:"access_key_id,omitempty"` + // The secret access key associated with the access key. + SecretAccessKey string `json:"secret_access_key,omitempty"` + // The generated JWT that users must pass to use the temporary credentials. + SessionToken string `json:"session_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *R2Credentials) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s R2Credentials) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get a Volume +type ReadVolumeRequest struct { + // Whether to include volumes in the response for which the principal can + // only access selective metadata for + IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` + // The three-level (fully qualified) name of the volume + Name string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *ReadVolumeRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ReadVolumeRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RegenerateDashboardRequest struct { + // Full name of the table. + TableName string `json:"-" url:"-"` + // Optional argument to specify the warehouse for dashboard regeneration. If + // not specified, the first running warehouse will be used. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegenerateDashboardRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegenerateDashboardRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RegenerateDashboardResponse struct { + // Id of the regenerated monitoring dashboard. + DashboardId string `json:"dashboard_id,omitempty"` + // The directory where the regenerated dashboard is stored. + ParentFolder string `json:"parent_folder,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegenerateDashboardResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegenerateDashboardResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Registered model alias. +type RegisteredModelAlias struct { + // Name of the alias, e.g. 'champion' or 'latest_stable' + AliasName string `json:"alias_name,omitempty"` + // Integer version number of the model version to which this alias points. + VersionNum int `json:"version_num,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegisteredModelAlias) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegisteredModelAlias) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RegisteredModelInfo struct { + // List of aliases associated with the registered model + Aliases []RegisteredModelAlias `json:"aliases,omitempty"` + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly bool `json:"browse_only,omitempty"` + // The name of the catalog where the schema and the registered model reside + CatalogName string `json:"catalog_name,omitempty"` + // The comment attached to the registered model + Comment string `json:"comment,omitempty"` + // Creation timestamp of the registered model in milliseconds since the Unix + // epoch + CreatedAt int64 `json:"created_at,omitempty"` + // The identifier of the user who created the registered model + CreatedBy string `json:"created_by,omitempty"` + // The three-level (fully qualified) name of the registered model + FullName string `json:"full_name,omitempty"` + // The unique identifier of the metastore + MetastoreId string `json:"metastore_id,omitempty"` + // The name of the registered model + Name string `json:"name,omitempty"` + // The identifier of the user who owns the registered model + Owner string `json:"owner,omitempty"` + // The name of the schema where the registered model resides + SchemaName string `json:"schema_name,omitempty"` + // The storage location on the cloud under which model version data files + // are stored + StorageLocation string `json:"storage_location,omitempty"` + // Last-update timestamp of the registered model in milliseconds since the + // Unix epoch + UpdatedAt int64 `json:"updated_at,omitempty"` + // The identifier of the user who updated the registered model last time + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegisteredModelInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegisteredModelInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Queue a metric refresh for a monitor +type RunRefreshRequest struct { + // Full name of the table. + TableName string `json:"-" url:"-"` +} + +type SchemaInfo struct { + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly bool `json:"browse_only,omitempty"` + // Name of parent catalog. + CatalogName string `json:"catalog_name,omitempty"` + // The type of the parent catalog. + CatalogType string `json:"catalog_type,omitempty"` + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Time at which this schema was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of schema creator. + CreatedBy string `json:"created_by,omitempty"` + + EffectivePredictiveOptimizationFlag *EffectivePredictiveOptimizationFlag `json:"effective_predictive_optimization_flag,omitempty"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + EnablePredictiveOptimization EnablePredictiveOptimization `json:"enable_predictive_optimization,omitempty"` + // Full name of schema, in form of __catalog_name__.__schema_name__. + FullName string `json:"full_name,omitempty"` + // Unique identifier of parent metastore. + MetastoreId string `json:"metastore_id,omitempty"` + // Name of schema, relative to parent catalog. + Name string `json:"name,omitempty"` + // Username of current owner of schema. + Owner string `json:"owner,omitempty"` + // A map of key-value properties attached to the securable. + Properties map[string]string `json:"properties,omitempty"` + // The unique identifier of the schema. + SchemaId string `json:"schema_id,omitempty"` + // Storage location for managed tables within schema. + StorageLocation string `json:"storage_location,omitempty"` + // Storage root URL for managed tables within schema. + StorageRoot string `json:"storage_root,omitempty"` + // Time at which this schema was created, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of user who last modified schema. + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SchemaInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SchemaInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// A map of key-value properties attached to the securable. +type SecurableOptionsMap map[string]string + +// A map of key-value properties attached to the securable. +type SecurablePropertiesMap map[string]string + +// The type of Unity Catalog securable +type SecurableType string + +const SecurableTypeCatalog SecurableType = `CATALOG` + +const SecurableTypeCleanRoom SecurableType = `CLEAN_ROOM` + +const SecurableTypeConnection SecurableType = `CONNECTION` + +const SecurableTypeCredential SecurableType = `CREDENTIAL` + +const SecurableTypeExternalLocation SecurableType = `EXTERNAL_LOCATION` + +const SecurableTypeFunction SecurableType = `FUNCTION` + +const SecurableTypeMetastore SecurableType = `METASTORE` + +const SecurableTypePipeline SecurableType = `PIPELINE` + +const SecurableTypeProvider SecurableType = `PROVIDER` + +const SecurableTypeRecipient SecurableType = `RECIPIENT` + +const SecurableTypeSchema SecurableType = `SCHEMA` + +const SecurableTypeShare SecurableType = `SHARE` + +const SecurableTypeStorageCredential SecurableType = `STORAGE_CREDENTIAL` + +const SecurableTypeTable SecurableType = `TABLE` + +const SecurableTypeVolume SecurableType = `VOLUME` + +// String representation for [fmt.Print] +func (f *SecurableType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SecurableType) Set(v string) error { + switch v { + case `CATALOG`, `CLEAN_ROOM`, `CONNECTION`, `CREDENTIAL`, `EXTERNAL_LOCATION`, `FUNCTION`, `METASTORE`, `PIPELINE`, `PROVIDER`, `RECIPIENT`, `SCHEMA`, `SHARE`, `STORAGE_CREDENTIAL`, `TABLE`, `VOLUME`: + *f = SecurableType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CATALOG", "CLEAN_ROOM", "CONNECTION", "CREDENTIAL", "EXTERNAL_LOCATION", "FUNCTION", "METASTORE", "PIPELINE", "PROVIDER", "RECIPIENT", "SCHEMA", "SHARE", "STORAGE_CREDENTIAL", "TABLE", "VOLUME"`, v) + } +} + +// Type always returns SecurableType to satisfy [pflag.Value] interface +func (f *SecurableType) Type() string { + return "SecurableType" +} + +type SetArtifactAllowlist struct { + // A list of allowed artifact match patterns. + ArtifactMatchers []ArtifactMatcher `json:"artifact_matchers"` + // The artifact type of the allowlist. + ArtifactType ArtifactType `json:"-" url:"-"` +} + +type SetRegisteredModelAliasRequest struct { + // The name of the alias + Alias string `json:"alias" url:"-"` + // Full name of the registered model + FullName string `json:"full_name" url:"-"` + // The version number of the model version to which the alias points + VersionNum int `json:"version_num"` +} + +// Server-Side Encryption properties for clients communicating with AWS s3. +type SseEncryptionDetails struct { + // The type of key encryption to use (affects headers from s3 client). + Algorithm SseEncryptionDetailsAlgorithm `json:"algorithm,omitempty"` + // When algorithm is **AWS_SSE_KMS** this field specifies the ARN of the SSE + // key to use. + AwsKmsKeyArn string `json:"aws_kms_key_arn,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SseEncryptionDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SseEncryptionDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of key encryption to use (affects headers from s3 client). +type SseEncryptionDetailsAlgorithm string + +const SseEncryptionDetailsAlgorithmAwsSseKms SseEncryptionDetailsAlgorithm = `AWS_SSE_KMS` + +const SseEncryptionDetailsAlgorithmAwsSseS3 SseEncryptionDetailsAlgorithm = `AWS_SSE_S3` + +// String representation for [fmt.Print] +func (f *SseEncryptionDetailsAlgorithm) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SseEncryptionDetailsAlgorithm) Set(v string) error { + switch v { + case `AWS_SSE_KMS`, `AWS_SSE_S3`: + *f = SseEncryptionDetailsAlgorithm(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AWS_SSE_KMS", "AWS_SSE_S3"`, v) + } +} + +// Type always returns SseEncryptionDetailsAlgorithm to satisfy [pflag.Value] interface +func (f *SseEncryptionDetailsAlgorithm) Type() string { + return "SseEncryptionDetailsAlgorithm" +} + +type StorageCredentialInfo struct { + // The AWS IAM role configuration. + AwsIamRole *AwsIamRoleResponse `json:"aws_iam_role,omitempty"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentityResponse `json:"azure_managed_identity,omitempty"` + // The Azure service principal configuration. + AzureServicePrincipal *AzureServicePrincipal `json:"azure_service_principal,omitempty"` + // The Cloudflare API token configuration. + CloudflareApiToken *CloudflareApiToken `json:"cloudflare_api_token,omitempty"` + // Comment associated with the credential. + Comment string `json:"comment,omitempty"` + // Time at which this Credential was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of credential creator. + CreatedBy string `json:"created_by,omitempty"` + // The Databricks managed GCP service account configuration. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccountResponse `json:"databricks_gcp_service_account,omitempty"` + // The full name of the credential. + FullName string `json:"full_name,omitempty"` + // The unique identifier of the credential. + Id string `json:"id,omitempty"` + + IsolationMode IsolationMode `json:"isolation_mode,omitempty"` + // Unique identifier of parent metastore. + MetastoreId string `json:"metastore_id,omitempty"` + // The credential name. The name must be unique within the metastore. + Name string `json:"name,omitempty"` + // Username of current owner of credential. + Owner string `json:"owner,omitempty"` + // Whether the storage credential is only usable for read operations. + ReadOnly bool `json:"read_only,omitempty"` + // Time at which this credential was last modified, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of user who last modified the credential. + UpdatedBy string `json:"updated_by,omitempty"` + // Whether this credential is the current metastore's root storage + // credential. + UsedForManagedStorage bool `json:"used_for_managed_storage,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *StorageCredentialInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s StorageCredentialInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SystemSchemaInfo struct { + // Name of the system schema. + Schema string `json:"schema,omitempty"` + // The current state of enablement for the system schema. An empty string + // means the system schema is available and ready for opt-in. + State SystemSchemaInfoState `json:"state,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SystemSchemaInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SystemSchemaInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The current state of enablement for the system schema. An empty string means +// the system schema is available and ready for opt-in. +type SystemSchemaInfoState string + +const SystemSchemaInfoStateAvailable SystemSchemaInfoState = `AVAILABLE` + +const SystemSchemaInfoStateDisableInitialized SystemSchemaInfoState = `DISABLE_INITIALIZED` + +const SystemSchemaInfoStateEnableCompleted SystemSchemaInfoState = `ENABLE_COMPLETED` + +const SystemSchemaInfoStateEnableInitialized SystemSchemaInfoState = `ENABLE_INITIALIZED` + +const SystemSchemaInfoStateUnavailable SystemSchemaInfoState = `UNAVAILABLE` + +// String representation for [fmt.Print] +func (f *SystemSchemaInfoState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SystemSchemaInfoState) Set(v string) error { + switch v { + case `AVAILABLE`, `DISABLE_INITIALIZED`, `ENABLE_COMPLETED`, `ENABLE_INITIALIZED`, `UNAVAILABLE`: + *f = SystemSchemaInfoState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AVAILABLE", "DISABLE_INITIALIZED", "ENABLE_COMPLETED", "ENABLE_INITIALIZED", "UNAVAILABLE"`, v) + } +} + +// Type always returns SystemSchemaInfoState to satisfy [pflag.Value] interface +func (f *SystemSchemaInfoState) Type() string { + return "SystemSchemaInfoState" +} + +// A table constraint, as defined by *one* of the following fields being set: +// __primary_key_constraint__, __foreign_key_constraint__, +// __named_table_constraint__. +type TableConstraint struct { + ForeignKeyConstraint *ForeignKeyConstraint `json:"foreign_key_constraint,omitempty"` + + NamedTableConstraint *NamedTableConstraint `json:"named_table_constraint,omitempty"` + + PrimaryKeyConstraint *PrimaryKeyConstraint `json:"primary_key_constraint,omitempty"` +} + +// A table that is dependent on a SQL object. +type TableDependency struct { + // Full name of the dependent table, in the form of + // __catalog_name__.__schema_name__.__table_name__. + TableFullName string `json:"table_full_name"` +} + +type TableExistsResponse struct { + // Whether the table exists or not. + TableExists bool `json:"table_exists,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TableExistsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TableExistsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TableInfo struct { + // The AWS access point to use when accesing s3 for this external location. + AccessPoint string `json:"access_point,omitempty"` + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly bool `json:"browse_only,omitempty"` + // Name of parent catalog. + CatalogName string `json:"catalog_name,omitempty"` + // The array of __ColumnInfo__ definitions of the table's columns. + Columns []ColumnInfo `json:"columns,omitempty"` + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Time at which this table was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of table creator. + CreatedBy string `json:"created_by,omitempty"` + // Unique ID of the Data Access Configuration to use with the table data. + DataAccessConfigurationId string `json:"data_access_configuration_id,omitempty"` + // Data source format + DataSourceFormat DataSourceFormat `json:"data_source_format,omitempty"` + // Time at which this table was deleted, in epoch milliseconds. Field is + // omitted if table is not deleted. + DeletedAt int64 `json:"deleted_at,omitempty"` + // Information pertaining to current state of the delta table. + DeltaRuntimePropertiesKvpairs *DeltaRuntimePropertiesKvPairs `json:"delta_runtime_properties_kvpairs,omitempty"` + + EffectivePredictiveOptimizationFlag *EffectivePredictiveOptimizationFlag `json:"effective_predictive_optimization_flag,omitempty"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + EnablePredictiveOptimization EnablePredictiveOptimization `json:"enable_predictive_optimization,omitempty"` + // Encryption options that apply to clients connecting to cloud storage. + EncryptionDetails *EncryptionDetails `json:"encryption_details,omitempty"` + // Full name of table, in form of + // __catalog_name__.__schema_name__.__table_name__ + FullName string `json:"full_name,omitempty"` + // Unique identifier of parent metastore. + MetastoreId string `json:"metastore_id,omitempty"` + // Name of table, relative to parent schema. + Name string `json:"name,omitempty"` + // Username of current owner of table. + Owner string `json:"owner,omitempty"` + // The pipeline ID of the table. Applicable for tables created by pipelines + // (Materialized View, Streaming Table, etc.). + PipelineId string `json:"pipeline_id,omitempty"` + // A map of key-value properties attached to the securable. + Properties map[string]string `json:"properties,omitempty"` + + RowFilter *TableRowFilter `json:"row_filter,omitempty"` + // Name of parent schema relative to its parent catalog. + SchemaName string `json:"schema_name,omitempty"` + // List of schemes whose objects can be referenced without qualification. + SqlPath string `json:"sql_path,omitempty"` + // Name of the storage credential, when a storage credential is configured + // for use with this table. + StorageCredentialName string `json:"storage_credential_name,omitempty"` + // Storage root URL for table (for **MANAGED**, **EXTERNAL** tables) + StorageLocation string `json:"storage_location,omitempty"` + // List of table constraints. Note: this field is not set in the output of + // the __listTables__ API. + TableConstraints []TableConstraint `json:"table_constraints,omitempty"` + // The unique identifier of the table. + TableId string `json:"table_id,omitempty"` + + TableType TableType `json:"table_type,omitempty"` + // Time at which this table was last modified, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of user who last modified the table. + UpdatedBy string `json:"updated_by,omitempty"` + // View definition SQL (when __table_type__ is **VIEW**, + // **MATERIALIZED_VIEW**, or **STREAMING_TABLE**) + ViewDefinition string `json:"view_definition,omitempty"` + // View dependencies (when table_type == **VIEW** or **MATERIALIZED_VIEW**, + // **STREAMING_TABLE**) - when DependencyList is None, the dependency is not + // provided; - when DependencyList is an empty list, the dependency is + // provided but is empty; - when DependencyList is not an empty list, + // dependencies are provided and recorded. + ViewDependencies *DependencyList `json:"view_dependencies,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TableInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TableInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TableOperation string + +const TableOperationRead TableOperation = `READ` + +const TableOperationReadWrite TableOperation = `READ_WRITE` + +// String representation for [fmt.Print] +func (f *TableOperation) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TableOperation) Set(v string) error { + switch v { + case `READ`, `READ_WRITE`: + *f = TableOperation(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "READ", "READ_WRITE"`, v) + } +} + +// Type always returns TableOperation to satisfy [pflag.Value] interface +func (f *TableOperation) Type() string { + return "TableOperation" +} + +type TableRowFilter struct { + // The full name of the row filter SQL UDF. + FunctionName string `json:"function_name"` + // The list of table columns to be passed as input to the row filter + // function. The column types should match the types of the filter function + // arguments. + InputColumnNames []string `json:"input_column_names"` +} + +type TableSummary struct { + // The full name of the table. + FullName string `json:"full_name,omitempty"` + + TableType TableType `json:"table_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TableSummary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TableSummary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TableType string + +const TableTypeExternal TableType = `EXTERNAL` + +const TableTypeExternalShallowClone TableType = `EXTERNAL_SHALLOW_CLONE` + +const TableTypeForeign TableType = `FOREIGN` + +const TableTypeManaged TableType = `MANAGED` + +const TableTypeManagedShallowClone TableType = `MANAGED_SHALLOW_CLONE` + +const TableTypeMaterializedView TableType = `MATERIALIZED_VIEW` + +const TableTypeStreamingTable TableType = `STREAMING_TABLE` + +const TableTypeView TableType = `VIEW` + +// String representation for [fmt.Print] +func (f *TableType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TableType) Set(v string) error { + switch v { + case `EXTERNAL`, `EXTERNAL_SHALLOW_CLONE`, `FOREIGN`, `MANAGED`, `MANAGED_SHALLOW_CLONE`, `MATERIALIZED_VIEW`, `STREAMING_TABLE`, `VIEW`: + *f = TableType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EXTERNAL", "EXTERNAL_SHALLOW_CLONE", "FOREIGN", "MANAGED", "MANAGED_SHALLOW_CLONE", "MATERIALIZED_VIEW", "STREAMING_TABLE", "VIEW"`, v) + } +} + +// Type always returns TableType to satisfy [pflag.Value] interface +func (f *TableType) Type() string { + return "TableType" +} + +type TemporaryCredentials struct { + // AWS temporary credentials for API authentication. Read more at + // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. + AwsTempCredentials *AwsCredentials `json:"aws_temp_credentials,omitempty"` + // Azure Active Directory token, essentially the Oauth token for Azure + // Service Principal or Managed Identity. Read more at + // https://learn.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token + AzureAad *AzureActiveDirectoryToken `json:"azure_aad,omitempty"` + // Server time when the credential will expire, in epoch milliseconds. The + // API client is advised to cache the credential given this expiration time. + ExpirationTime int64 `json:"expiration_time,omitempty"` + // GCP temporary credentials for API authentication. Read more at + // https://developers.google.com/identity/protocols/oauth2/service-account + GcpOauthToken *GcpOauthToken `json:"gcp_oauth_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TemporaryCredentials) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TemporaryCredentials) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Detailed status of an online table. Shown if the online table is in the +// ONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state. +type TriggeredUpdateStatus struct { + // The last source table Delta version that was synced to the online table. + // Note that this Delta version may not be completely synced to the online + // table yet. + LastProcessedCommitVersion int64 `json:"last_processed_commit_version,omitempty"` + // The timestamp of the last time any data was synchronized from the source + // table to the online table. + Timestamp string `json:"timestamp,omitempty"` + // Progress of the active data synchronization pipeline. + TriggeredUpdateProgress *PipelineProgress `json:"triggered_update_progress,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TriggeredUpdateStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TriggeredUpdateStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete an assignment +type UnassignRequest struct { + // Query for the ID of the metastore to delete. + MetastoreId string `json:"-" url:"metastore_id"` + // A workspace ID. + WorkspaceId int64 `json:"-" url:"-"` +} + +type UnassignResponse struct { +} + +type UpdateAssignmentResponse struct { +} + +type UpdateBindingsSecurableType string + +const UpdateBindingsSecurableTypeCatalog UpdateBindingsSecurableType = `catalog` + +const UpdateBindingsSecurableTypeCredential UpdateBindingsSecurableType = `credential` + +const UpdateBindingsSecurableTypeExternalLocation UpdateBindingsSecurableType = `external_location` + +const UpdateBindingsSecurableTypeStorageCredential UpdateBindingsSecurableType = `storage_credential` + +// String representation for [fmt.Print] +func (f *UpdateBindingsSecurableType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpdateBindingsSecurableType) Set(v string) error { + switch v { + case `catalog`, `credential`, `external_location`, `storage_credential`: + *f = UpdateBindingsSecurableType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "catalog", "credential", "external_location", "storage_credential"`, v) + } +} + +// Type always returns UpdateBindingsSecurableType to satisfy [pflag.Value] interface +func (f *UpdateBindingsSecurableType) Type() string { + return "UpdateBindingsSecurableType" +} + +type UpdateCatalog struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + EnablePredictiveOptimization EnablePredictiveOptimization `json:"enable_predictive_optimization,omitempty"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode CatalogIsolationMode `json:"isolation_mode,omitempty"` + // The name of the catalog. + Name string `json:"-" url:"-"` + // New name for the catalog. + NewName string `json:"new_name,omitempty"` + // A map of key-value properties attached to the securable. + Options map[string]string `json:"options,omitempty"` + // Username of current owner of catalog. + Owner string `json:"owner,omitempty"` + // A map of key-value properties attached to the securable. + Properties map[string]string `json:"properties,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateCatalog) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateCatalog) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateConnection struct { + // Name of the connection. + Name string `json:"-" url:"-"` + // New name for the connection. + NewName string `json:"new_name,omitempty"` + // A map of key-value properties attached to the securable. + Options map[string]string `json:"options"` + // Username of current owner of the connection. + Owner string `json:"owner,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateConnection) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateConnection) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateCredentialRequest struct { + // The AWS IAM role configuration + AwsIamRole *AwsIamRole `json:"aws_iam_role,omitempty"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentity `json:"azure_managed_identity,omitempty"` + // The Azure service principal configuration. Only applicable when purpose + // is **STORAGE**. + AzureServicePrincipal *AzureServicePrincipal `json:"azure_service_principal,omitempty"` + // Comment associated with the credential. + Comment string `json:"comment,omitempty"` + // GCP long-lived credential. Databricks-created Google Cloud Storage + // service account. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"` + // Force an update even if there are dependent services (when purpose is + // **SERVICE**) or dependent external locations and external tables (when + // purpose is **STORAGE**). + Force bool `json:"force,omitempty"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode IsolationMode `json:"isolation_mode,omitempty"` + // Name of the credential. + NameArg string `json:"-" url:"-"` + // New name of credential. + NewName string `json:"new_name,omitempty"` + // Username of current owner of credential. + Owner string `json:"owner,omitempty"` + // Whether the credential is usable only for read operations. Only + // applicable when purpose is **STORAGE**. + ReadOnly bool `json:"read_only,omitempty"` + // Supply true to this argument to skip validation of the updated + // credential. + SkipValidation bool `json:"skip_validation,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateCredentialRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateCredentialRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateExternalLocation struct { + // The AWS access point to use when accesing s3 for this external location. + AccessPoint string `json:"access_point,omitempty"` + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Name of the storage credential used with this location. + CredentialName string `json:"credential_name,omitempty"` + // Encryption options that apply to clients connecting to cloud storage. + EncryptionDetails *EncryptionDetails `json:"encryption_details,omitempty"` + // Indicates whether fallback mode is enabled for this external location. + // When fallback mode is enabled, the access to the location falls back to + // cluster credentials if UC credentials are not sufficient. + Fallback bool `json:"fallback,omitempty"` + // Force update even if changing url invalidates dependent external tables + // or mounts. + Force bool `json:"force,omitempty"` + + IsolationMode IsolationMode `json:"isolation_mode,omitempty"` + // Name of the external location. + Name string `json:"-" url:"-"` + // New name for the external location. + NewName string `json:"new_name,omitempty"` + // The owner of the external location. + Owner string `json:"owner,omitempty"` + // Indicates whether the external location is read-only. + ReadOnly bool `json:"read_only,omitempty"` + // Skips validation of the storage credential associated with the external + // location. + SkipValidation bool `json:"skip_validation,omitempty"` + // Path URL of the external location. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateExternalLocation) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateExternalLocation) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateFunction struct { + // The fully-qualified name of the function (of the form + // __catalog_name__.__schema_name__.__function__name__). + Name string `json:"-" url:"-"` + // Username of current owner of function. + Owner string `json:"owner,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateFunction) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateFunction) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateMetastore struct { + // The organization name of a Delta Sharing entity, to be used in + // Databricks-to-Databricks Delta Sharing as the official name. + DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"` + // The lifetime of delta sharing recipient token in seconds. + DeltaSharingRecipientTokenLifetimeInSeconds int64 `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"` + // The scope of Delta Sharing enabled for the metastore. + DeltaSharingScope UpdateMetastoreDeltaSharingScope `json:"delta_sharing_scope,omitempty"` + // Unique ID of the metastore. + Id string `json:"-" url:"-"` + // New name for the metastore. + NewName string `json:"new_name,omitempty"` + // The owner of the metastore. + Owner string `json:"owner,omitempty"` + // Privilege model version of the metastore, of the form `major.minor` + // (e.g., `1.0`). + PrivilegeModelVersion string `json:"privilege_model_version,omitempty"` + // UUID of storage credential to access the metastore storage_root. + StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateMetastore) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateMetastore) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateMetastoreAssignment struct { + // The name of the default catalog in the metastore. This field is + // depracted. Please use "Default Namespace API" to configure the default + // catalog for a Databricks workspace. + DefaultCatalogName string `json:"default_catalog_name,omitempty"` + // The unique ID of the metastore. + MetastoreId string `json:"metastore_id,omitempty"` + // A workspace ID. + WorkspaceId int64 `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateMetastoreAssignment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateMetastoreAssignment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The scope of Delta Sharing enabled for the metastore. +type UpdateMetastoreDeltaSharingScope string + +const UpdateMetastoreDeltaSharingScopeInternal UpdateMetastoreDeltaSharingScope = `INTERNAL` + +const UpdateMetastoreDeltaSharingScopeInternalAndExternal UpdateMetastoreDeltaSharingScope = `INTERNAL_AND_EXTERNAL` + +// String representation for [fmt.Print] +func (f *UpdateMetastoreDeltaSharingScope) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpdateMetastoreDeltaSharingScope) Set(v string) error { + switch v { + case `INTERNAL`, `INTERNAL_AND_EXTERNAL`: + *f = UpdateMetastoreDeltaSharingScope(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "INTERNAL", "INTERNAL_AND_EXTERNAL"`, v) + } +} + +// Type always returns UpdateMetastoreDeltaSharingScope to satisfy [pflag.Value] interface +func (f *UpdateMetastoreDeltaSharingScope) Type() string { + return "UpdateMetastoreDeltaSharingScope" +} + +type UpdateModelVersionRequest struct { + // The comment attached to the model version + Comment string `json:"comment,omitempty"` + // The three-level (fully qualified) name of the model version + FullName string `json:"-" url:"-"` + // The integer version number of the model version + Version int `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateModelVersionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateModelVersionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateMonitor struct { + // Name of the baseline table from which drift metrics are computed from. + // Columns in the monitored table should also be present in the baseline + // table. + BaselineTableName string `json:"baseline_table_name,omitempty"` + // Custom metrics to compute on the monitored table. These can be aggregate + // metrics, derived metrics (from already computed aggregate metrics), or + // drift metrics (comparing metrics across time windows). + CustomMetrics []MonitorMetric `json:"custom_metrics,omitempty"` + // Id of dashboard that visualizes the computed metrics. This can be empty + // if the monitor is in PENDING state. + DashboardId string `json:"dashboard_id,omitempty"` + // The data classification config for the monitor. + DataClassificationConfig *MonitorDataClassificationConfig `json:"data_classification_config,omitempty"` + // Configuration for monitoring inference logs. + InferenceLog *MonitorInferenceLog `json:"inference_log,omitempty"` + // The notification settings for the monitor. + Notifications *MonitorNotifications `json:"notifications,omitempty"` + // Schema where output metric tables are created. + OutputSchemaName string `json:"output_schema_name"` + // The schedule for automatically updating and refreshing metric tables. + Schedule *MonitorCronSchedule `json:"schedule,omitempty"` + // List of column expressions to slice data with for targeted analysis. The + // data is grouped by each expression independently, resulting in a separate + // slice for each predicate and its complements. For high-cardinality + // columns, only the top 100 unique values by frequency will generate + // slices. + SlicingExprs []string `json:"slicing_exprs,omitempty"` + // Configuration for monitoring snapshot tables. + Snapshot *MonitorSnapshot `json:"snapshot,omitempty"` + // Full name of the table. + TableName string `json:"-" url:"-"` + // Configuration for monitoring time series tables. + TimeSeries *MonitorTimeSeries `json:"time_series,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateMonitor) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateMonitor) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdatePermissions struct { + // Array of permissions change objects. + Changes []PermissionsChange `json:"changes,omitempty"` + // Full name of securable. + FullName string `json:"-" url:"-"` + // Type of securable. + SecurableType SecurableType `json:"-" url:"-"` +} + +type UpdateRegisteredModelRequest struct { + // The comment attached to the registered model + Comment string `json:"comment,omitempty"` + // The three-level (fully qualified) name of the registered model + FullName string `json:"-" url:"-"` + // New name for the registered model. + NewName string `json:"new_name,omitempty"` + // The identifier of the user who owns the registered model + Owner string `json:"owner,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateRegisteredModelRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateRegisteredModelRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateResponse struct { +} + +type UpdateSchema struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + EnablePredictiveOptimization EnablePredictiveOptimization `json:"enable_predictive_optimization,omitempty"` + // Full name of the schema. + FullName string `json:"-" url:"-"` + // New name for the schema. + NewName string `json:"new_name,omitempty"` + // Username of current owner of schema. + Owner string `json:"owner,omitempty"` + // A map of key-value properties attached to the securable. + Properties map[string]string `json:"properties,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateSchema) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateSchema) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateStorageCredential struct { + // The AWS IAM role configuration. + AwsIamRole *AwsIamRoleRequest `json:"aws_iam_role,omitempty"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentityResponse `json:"azure_managed_identity,omitempty"` + // The Azure service principal configuration. + AzureServicePrincipal *AzureServicePrincipal `json:"azure_service_principal,omitempty"` + // The Cloudflare API token configuration. + CloudflareApiToken *CloudflareApiToken `json:"cloudflare_api_token,omitempty"` + // Comment associated with the credential. + Comment string `json:"comment,omitempty"` + // The Databricks managed GCP service account configuration. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccountRequest `json:"databricks_gcp_service_account,omitempty"` + // Force update even if there are dependent external locations or external + // tables. + Force bool `json:"force,omitempty"` + + IsolationMode IsolationMode `json:"isolation_mode,omitempty"` + // Name of the storage credential. + Name string `json:"-" url:"-"` + // New name for the storage credential. + NewName string `json:"new_name,omitempty"` + // Username of current owner of credential. + Owner string `json:"owner,omitempty"` + // Whether the storage credential is only usable for read operations. + ReadOnly bool `json:"read_only,omitempty"` + // Supplying true to this argument skips validation of the updated + // credential. + SkipValidation bool `json:"skip_validation,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateStorageCredential) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateStorageCredential) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Update a table owner. +type UpdateTableRequest struct { + // Full name of the table. + FullName string `json:"-" url:"-"` + + Owner string `json:"owner,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateTableRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateTableRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateVolumeRequestContent struct { + // The comment attached to the volume + Comment string `json:"comment,omitempty"` + // The three-level (fully qualified) name of the volume + Name string `json:"-" url:"-"` + // New name for the volume. + NewName string `json:"new_name,omitempty"` + // The identifier of the user who owns the volume + Owner string `json:"owner,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateVolumeRequestContent) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateVolumeRequestContent) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateWorkspaceBindings struct { + // A list of workspace IDs. + AssignWorkspaces []int64 `json:"assign_workspaces,omitempty"` + // The name of the catalog. + Name string `json:"-" url:"-"` + // A list of workspace IDs. + UnassignWorkspaces []int64 `json:"unassign_workspaces,omitempty"` +} + +type UpdateWorkspaceBindingsParameters struct { + // List of workspace bindings + Add []WorkspaceBinding `json:"add,omitempty"` + // List of workspace bindings + Remove []WorkspaceBinding `json:"remove,omitempty"` + // The name of the securable. + SecurableName string `json:"-" url:"-"` + // The type of the securable to bind to a workspace. + SecurableType UpdateBindingsSecurableType `json:"-" url:"-"` +} + +type ValidateCredentialRequest struct { + // The AWS IAM role configuration + AwsIamRole *AwsIamRole `json:"aws_iam_role,omitempty"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentity `json:"azure_managed_identity,omitempty"` + // Required. The name of an existing credential or long-lived cloud + // credential to validate. + CredentialName string `json:"credential_name,omitempty"` + // The name of an existing external location to validate. Only applicable + // for storage credentials (purpose is **STORAGE**.) + ExternalLocationName string `json:"external_location_name,omitempty"` + // The purpose of the credential. This should only be used when the + // credential is specified. + Purpose CredentialPurpose `json:"purpose,omitempty"` + // Whether the credential is only usable for read operations. Only + // applicable for storage credentials (purpose is **STORAGE**.) + ReadOnly bool `json:"read_only,omitempty"` + // The external location url to validate. Only applicable when purpose is + // **STORAGE**. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ValidateCredentialRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ValidateCredentialRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ValidateCredentialResponse struct { + // Whether the tested location is a directory in cloud storage. Only + // applicable for when purpose is **STORAGE**. + IsDir bool `json:"isDir,omitempty"` + // The results of the validation check. + Results []CredentialValidationResult `json:"results,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ValidateCredentialResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ValidateCredentialResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// A enum represents the result of the file operation +type ValidateCredentialResult string + +const ValidateCredentialResultFail ValidateCredentialResult = `FAIL` + +const ValidateCredentialResultPass ValidateCredentialResult = `PASS` + +const ValidateCredentialResultSkip ValidateCredentialResult = `SKIP` + +// String representation for [fmt.Print] +func (f *ValidateCredentialResult) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ValidateCredentialResult) Set(v string) error { + switch v { + case `FAIL`, `PASS`, `SKIP`: + *f = ValidateCredentialResult(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAIL", "PASS", "SKIP"`, v) + } +} + +// Type always returns ValidateCredentialResult to satisfy [pflag.Value] interface +func (f *ValidateCredentialResult) Type() string { + return "ValidateCredentialResult" +} + +type ValidateStorageCredential struct { + // The AWS IAM role configuration. + AwsIamRole *AwsIamRoleRequest `json:"aws_iam_role,omitempty"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentityRequest `json:"azure_managed_identity,omitempty"` + // The Azure service principal configuration. + AzureServicePrincipal *AzureServicePrincipal `json:"azure_service_principal,omitempty"` + // The Cloudflare API token configuration. + CloudflareApiToken *CloudflareApiToken `json:"cloudflare_api_token,omitempty"` + // The Databricks created GCP service account configuration. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccountRequest `json:"databricks_gcp_service_account,omitempty"` + // The name of an existing external location to validate. + ExternalLocationName string `json:"external_location_name,omitempty"` + // Whether the storage credential is only usable for read operations. + ReadOnly bool `json:"read_only,omitempty"` + // The name of the storage credential to validate. + StorageCredentialName string `json:"storage_credential_name,omitempty"` + // The external location url to validate. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ValidateStorageCredential) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ValidateStorageCredential) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ValidateStorageCredentialResponse struct { + // Whether the tested location is a directory in cloud storage. + IsDir bool `json:"isDir,omitempty"` + // The results of the validation check. + Results []ValidationResult `json:"results,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ValidateStorageCredentialResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ValidateStorageCredentialResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ValidationResult struct { + // Error message would exist when the result does not equal to **PASS**. + Message string `json:"message,omitempty"` + // The operation tested. + Operation ValidationResultOperation `json:"operation,omitempty"` + // The results of the tested operation. + Result ValidationResultResult `json:"result,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ValidationResult) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ValidationResult) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The operation tested. +type ValidationResultOperation string + +const ValidationResultOperationDelete ValidationResultOperation = `DELETE` + +const ValidationResultOperationList ValidationResultOperation = `LIST` + +const ValidationResultOperationPathExists ValidationResultOperation = `PATH_EXISTS` + +const ValidationResultOperationRead ValidationResultOperation = `READ` + +const ValidationResultOperationWrite ValidationResultOperation = `WRITE` + +// String representation for [fmt.Print] +func (f *ValidationResultOperation) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ValidationResultOperation) Set(v string) error { + switch v { + case `DELETE`, `LIST`, `PATH_EXISTS`, `READ`, `WRITE`: + *f = ValidationResultOperation(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELETE", "LIST", "PATH_EXISTS", "READ", "WRITE"`, v) + } +} + +// Type always returns ValidationResultOperation to satisfy [pflag.Value] interface +func (f *ValidationResultOperation) Type() string { + return "ValidationResultOperation" +} + +// The results of the tested operation. +type ValidationResultResult string + +const ValidationResultResultFail ValidationResultResult = `FAIL` + +const ValidationResultResultPass ValidationResultResult = `PASS` + +const ValidationResultResultSkip ValidationResultResult = `SKIP` + +// String representation for [fmt.Print] +func (f *ValidationResultResult) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ValidationResultResult) Set(v string) error { + switch v { + case `FAIL`, `PASS`, `SKIP`: + *f = ValidationResultResult(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAIL", "PASS", "SKIP"`, v) + } +} + +// Type always returns ValidationResultResult to satisfy [pflag.Value] interface +func (f *ValidationResultResult) Type() string { + return "ValidationResultResult" +} + +type VolumeInfo struct { + // The AWS access point to use when accesing s3 for this external location. + AccessPoint string `json:"access_point,omitempty"` + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly bool `json:"browse_only,omitempty"` + // The name of the catalog where the schema and the volume are + CatalogName string `json:"catalog_name,omitempty"` + // The comment attached to the volume + Comment string `json:"comment,omitempty"` + + CreatedAt int64 `json:"created_at,omitempty"` + // The identifier of the user who created the volume + CreatedBy string `json:"created_by,omitempty"` + // Encryption options that apply to clients connecting to cloud storage. + EncryptionDetails *EncryptionDetails `json:"encryption_details,omitempty"` + // The three-level (fully qualified) name of the volume + FullName string `json:"full_name,omitempty"` + // The unique identifier of the metastore + MetastoreId string `json:"metastore_id,omitempty"` + // The name of the volume + Name string `json:"name,omitempty"` + // The identifier of the user who owns the volume + Owner string `json:"owner,omitempty"` + // The name of the schema where the volume is + SchemaName string `json:"schema_name,omitempty"` + // The storage location on the cloud + StorageLocation string `json:"storage_location,omitempty"` + + UpdatedAt int64 `json:"updated_at,omitempty"` + // The identifier of the user who updated the volume last time + UpdatedBy string `json:"updated_by,omitempty"` + // The unique identifier of the volume + VolumeId string `json:"volume_id,omitempty"` + + VolumeType VolumeType `json:"volume_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *VolumeInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s VolumeInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type VolumeType string + +const VolumeTypeExternal VolumeType = `EXTERNAL` + +const VolumeTypeManaged VolumeType = `MANAGED` + +// String representation for [fmt.Print] +func (f *VolumeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *VolumeType) Set(v string) error { + switch v { + case `EXTERNAL`, `MANAGED`: + *f = VolumeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EXTERNAL", "MANAGED"`, v) + } +} + +// Type always returns VolumeType to satisfy [pflag.Value] interface +func (f *VolumeType) Type() string { + return "VolumeType" +} + +type WorkspaceBinding struct { + BindingType WorkspaceBindingBindingType `json:"binding_type,omitempty"` + + WorkspaceId int64 `json:"workspace_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WorkspaceBinding) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WorkspaceBinding) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type WorkspaceBindingBindingType string + +const WorkspaceBindingBindingTypeBindingTypeReadOnly WorkspaceBindingBindingType = `BINDING_TYPE_READ_ONLY` + +const WorkspaceBindingBindingTypeBindingTypeReadWrite WorkspaceBindingBindingType = `BINDING_TYPE_READ_WRITE` + +// String representation for [fmt.Print] +func (f *WorkspaceBindingBindingType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WorkspaceBindingBindingType) Set(v string) error { + switch v { + case `BINDING_TYPE_READ_ONLY`, `BINDING_TYPE_READ_WRITE`: + *f = WorkspaceBindingBindingType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BINDING_TYPE_READ_ONLY", "BINDING_TYPE_READ_WRITE"`, v) + } +} + +// Type always returns WorkspaceBindingBindingType to satisfy [pflag.Value] interface +func (f *WorkspaceBindingBindingType) Type() string { + return "WorkspaceBindingBindingType" +} + +// Currently assigned workspace bindings +type WorkspaceBindingsResponse struct { + // List of workspace bindings + Bindings []WorkspaceBinding `json:"bindings,omitempty"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WorkspaceBindingsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WorkspaceBindingsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} diff --git a/cleanrooms/v2preview/api.go b/cleanrooms/v2preview/api.go new file mode 100755 index 000000000..0d568e973 --- /dev/null +++ b/cleanrooms/v2preview/api.go @@ -0,0 +1,257 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Clean Room Assets Preview, Clean Room Task Runs Preview, Clean Rooms Preview, etc. +package cleanroomspreview + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" +) + +type CleanRoomAssetsPreviewInterface interface { + + // Create an asset. + // + // Create a clean room asset —share an asset like a notebook or table into the + // clean room. For each UC asset that is added through this method, the clean + // room owner must also have enough privilege on the asset to consume it. The + // privilege must be maintained indefinitely for the clean room to be able to + // access the asset. Typically, you should use a group as the clean room owner. + Create(ctx context.Context, request CreateCleanRoomAssetRequest) (*CleanRoomAsset, error) + + // Delete an asset. + // + // Delete a clean room asset - unshare/remove the asset from the clean room + Delete(ctx context.Context, request DeleteCleanRoomAssetRequest) error + + // Delete an asset. + // + // Delete a clean room asset - unshare/remove the asset from the clean room + DeleteByCleanRoomNameAndAssetTypeAndAssetFullName(ctx context.Context, cleanRoomName string, assetType CleanRoomAssetAssetType, assetFullName string) error + + // Get an asset. + // + // Get the details of a clean room asset by its type and full name. + Get(ctx context.Context, request GetCleanRoomAssetRequest) (*CleanRoomAsset, error) + + // Get an asset. + // + // Get the details of a clean room asset by its type and full name. + GetByCleanRoomNameAndAssetTypeAndAssetFullName(ctx context.Context, cleanRoomName string, assetType CleanRoomAssetAssetType, assetFullName string) (*CleanRoomAsset, error) + + // List assets. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListCleanRoomAssetsRequest) listing.Iterator[CleanRoomAsset] + + // List assets. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListCleanRoomAssetsRequest) ([]CleanRoomAsset, error) + + // List assets. + ListByCleanRoomName(ctx context.Context, cleanRoomName string) (*ListCleanRoomAssetsResponse, error) + + // Update an asset. + // + // Update a clean room asset. For example, updating the content of a notebook; + // changing the shared partitions of a table; etc. + Update(ctx context.Context, request UpdateCleanRoomAssetRequest) (*CleanRoomAsset, error) +} + +func NewCleanRoomAssetsPreview(client *client.DatabricksClient) *CleanRoomAssetsPreviewAPI { + return &CleanRoomAssetsPreviewAPI{ + cleanRoomAssetsPreviewImpl: cleanRoomAssetsPreviewImpl{ + client: client, + }, + } +} + +// Clean room assets are data and code objects — Tables, volumes, and +// notebooks that are shared with the clean room. +type CleanRoomAssetsPreviewAPI struct { + cleanRoomAssetsPreviewImpl +} + +// Delete an asset. +// +// Delete a clean room asset - unshare/remove the asset from the clean room +func (a *CleanRoomAssetsPreviewAPI) DeleteByCleanRoomNameAndAssetTypeAndAssetFullName(ctx context.Context, cleanRoomName string, assetType CleanRoomAssetAssetType, assetFullName string) error { + return a.cleanRoomAssetsPreviewImpl.Delete(ctx, DeleteCleanRoomAssetRequest{ + CleanRoomName: cleanRoomName, + AssetType: assetType, + AssetFullName: assetFullName, + }) +} + +// Get an asset. +// +// Get the details of a clean room asset by its type and full name. +func (a *CleanRoomAssetsPreviewAPI) GetByCleanRoomNameAndAssetTypeAndAssetFullName(ctx context.Context, cleanRoomName string, assetType CleanRoomAssetAssetType, assetFullName string) (*CleanRoomAsset, error) { + return a.cleanRoomAssetsPreviewImpl.Get(ctx, GetCleanRoomAssetRequest{ + CleanRoomName: cleanRoomName, + AssetType: assetType, + AssetFullName: assetFullName, + }) +} + +// List assets. +func (a *CleanRoomAssetsPreviewAPI) ListByCleanRoomName(ctx context.Context, cleanRoomName string) (*ListCleanRoomAssetsResponse, error) { + return a.cleanRoomAssetsPreviewImpl.internalList(ctx, ListCleanRoomAssetsRequest{ + CleanRoomName: cleanRoomName, + }) +} + +type CleanRoomTaskRunsPreviewInterface interface { + + // List notebook task runs. + // + // List all the historical notebook task runs in a clean room. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) listing.Iterator[CleanRoomNotebookTaskRun] + + // List notebook task runs. + // + // List all the historical notebook task runs in a clean room. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) ([]CleanRoomNotebookTaskRun, error) + + // List notebook task runs. + // + // List all the historical notebook task runs in a clean room. + ListByCleanRoomName(ctx context.Context, cleanRoomName string) (*ListCleanRoomNotebookTaskRunsResponse, error) +} + +func NewCleanRoomTaskRunsPreview(client *client.DatabricksClient) *CleanRoomTaskRunsPreviewAPI { + return &CleanRoomTaskRunsPreviewAPI{ + cleanRoomTaskRunsPreviewImpl: cleanRoomTaskRunsPreviewImpl{ + client: client, + }, + } +} + +// Clean room task runs are the executions of notebooks in a clean room. +type CleanRoomTaskRunsPreviewAPI struct { + cleanRoomTaskRunsPreviewImpl +} + +// List notebook task runs. +// +// List all the historical notebook task runs in a clean room. +func (a *CleanRoomTaskRunsPreviewAPI) ListByCleanRoomName(ctx context.Context, cleanRoomName string) (*ListCleanRoomNotebookTaskRunsResponse, error) { + return a.cleanRoomTaskRunsPreviewImpl.internalList(ctx, ListCleanRoomNotebookTaskRunsRequest{ + CleanRoomName: cleanRoomName, + }) +} + +type CleanRoomsPreviewInterface interface { + + // Create a clean room. + // + // Create a new clean room with the specified collaborators. This method is + // asynchronous; the returned name field inside the clean_room field can be used + // to poll the clean room status, using the :method:cleanrooms/get method. When + // this method returns, the clean room will be in a PROVISIONING state, with + // only name, owner, comment, created_at and status populated. The clean room + // will be usable once it enters an ACTIVE state. + // + // The caller must be a metastore admin or have the **CREATE_CLEAN_ROOM** + // privilege on the metastore. + Create(ctx context.Context, request CreateCleanRoomRequest) (*CleanRoom, error) + + // Create an output catalog. + // + // Create the output catalog of the clean room. + CreateOutputCatalog(ctx context.Context, request CreateCleanRoomOutputCatalogRequest) (*CreateCleanRoomOutputCatalogResponse, error) + + // Delete a clean room. + // + // Delete a clean room. After deletion, the clean room will be removed from the + // metastore. If the other collaborators have not deleted the clean room, they + // will still have the clean room in their metastore, but it will be in a + // DELETED state and no operations other than deletion can be performed on it. + Delete(ctx context.Context, request DeleteCleanRoomRequest) error + + // Delete a clean room. + // + // Delete a clean room. After deletion, the clean room will be removed from the + // metastore. If the other collaborators have not deleted the clean room, they + // will still have the clean room in their metastore, but it will be in a + // DELETED state and no operations other than deletion can be performed on it. + DeleteByName(ctx context.Context, name string) error + + // Get a clean room. + // + // Get the details of a clean room given its name. + Get(ctx context.Context, request GetCleanRoomRequest) (*CleanRoom, error) + + // Get a clean room. + // + // Get the details of a clean room given its name. + GetByName(ctx context.Context, name string) (*CleanRoom, error) + + // List clean rooms. + // + // Get a list of all clean rooms of the metastore. Only clean rooms the caller + // has access to are returned. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListCleanRoomsRequest) listing.Iterator[CleanRoom] + + // List clean rooms. + // + // Get a list of all clean rooms of the metastore. Only clean rooms the caller + // has access to are returned. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListCleanRoomsRequest) ([]CleanRoom, error) + + // Update a clean room. + // + // Update a clean room. The caller must be the owner of the clean room, have + // **MODIFY_CLEAN_ROOM** privilege, or be metastore admin. + // + // When the caller is a metastore admin, only the __owner__ field can be + // updated. + Update(ctx context.Context, request UpdateCleanRoomRequest) (*CleanRoom, error) +} + +func NewCleanRoomsPreview(client *client.DatabricksClient) *CleanRoomsPreviewAPI { + return &CleanRoomsPreviewAPI{ + cleanRoomsPreviewImpl: cleanRoomsPreviewImpl{ + client: client, + }, + } +} + +// A clean room uses Delta Sharing and serverless compute to provide a secure +// and privacy-protecting environment where multiple parties can work together +// on sensitive enterprise data without direct access to each other’s data. +type CleanRoomsPreviewAPI struct { + cleanRoomsPreviewImpl +} + +// Delete a clean room. +// +// Delete a clean room. After deletion, the clean room will be removed from the +// metastore. If the other collaborators have not deleted the clean room, they +// will still have the clean room in their metastore, but it will be in a +// DELETED state and no operations other than deletion can be performed on it. +func (a *CleanRoomsPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.cleanRoomsPreviewImpl.Delete(ctx, DeleteCleanRoomRequest{ + Name: name, + }) +} + +// Get a clean room. +// +// Get the details of a clean room given its name. +func (a *CleanRoomsPreviewAPI) GetByName(ctx context.Context, name string) (*CleanRoom, error) { + return a.cleanRoomsPreviewImpl.Get(ctx, GetCleanRoomRequest{ + Name: name, + }) +} diff --git a/cleanrooms/v2preview/client.go b/cleanrooms/v2preview/client.go new file mode 100755 index 000000000..e003bd7c9 --- /dev/null +++ b/cleanrooms/v2preview/client.go @@ -0,0 +1,113 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package cleanroomspreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type CleanRoomAssetsPreviewClient struct { + CleanRoomAssetsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewCleanRoomAssetsPreviewClient(cfg *config.Config) (*CleanRoomAssetsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &CleanRoomAssetsPreviewClient{ + Config: cfg, + apiClient: apiClient, + CleanRoomAssetsPreviewInterface: NewCleanRoomAssetsPreview(databricksClient), + }, nil +} + +type CleanRoomTaskRunsPreviewClient struct { + CleanRoomTaskRunsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewCleanRoomTaskRunsPreviewClient(cfg *config.Config) (*CleanRoomTaskRunsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &CleanRoomTaskRunsPreviewClient{ + Config: cfg, + apiClient: apiClient, + CleanRoomTaskRunsPreviewInterface: NewCleanRoomTaskRunsPreview(databricksClient), + }, nil +} + +type CleanRoomsPreviewClient struct { + CleanRoomsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewCleanRoomsPreviewClient(cfg *config.Config) (*CleanRoomsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &CleanRoomsPreviewClient{ + Config: cfg, + apiClient: apiClient, + CleanRoomsPreviewInterface: NewCleanRoomsPreview(databricksClient), + }, nil +} diff --git a/cleanrooms/v2preview/impl.go b/cleanrooms/v2preview/impl.go new file mode 100755 index 000000000..7c4903a52 --- /dev/null +++ b/cleanrooms/v2preview/impl.go @@ -0,0 +1,253 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package cleanroomspreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just CleanRoomAssetsPreview API methods +type cleanRoomAssetsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *cleanRoomAssetsPreviewImpl) Create(ctx context.Context, request CreateCleanRoomAssetRequest) (*CleanRoomAsset, error) { + var cleanRoomAsset CleanRoomAsset + path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/assets", request.CleanRoomName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Asset, &cleanRoomAsset) + return &cleanRoomAsset, err +} + +func (a *cleanRoomAssetsPreviewImpl) Delete(ctx context.Context, request DeleteCleanRoomAssetRequest) error { + var deleteCleanRoomAssetResponse DeleteCleanRoomAssetResponse + path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/assets/%v/%v", request.CleanRoomName, request.AssetType, request.AssetFullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteCleanRoomAssetResponse) + return err +} + +func (a *cleanRoomAssetsPreviewImpl) Get(ctx context.Context, request GetCleanRoomAssetRequest) (*CleanRoomAsset, error) { + var cleanRoomAsset CleanRoomAsset + path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/assets/%v/%v", request.CleanRoomName, request.AssetType, request.AssetFullName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &cleanRoomAsset) + return &cleanRoomAsset, err +} + +// List assets. +func (a *cleanRoomAssetsPreviewImpl) List(ctx context.Context, request ListCleanRoomAssetsRequest) listing.Iterator[CleanRoomAsset] { + + getNextPage := func(ctx context.Context, req ListCleanRoomAssetsRequest) (*ListCleanRoomAssetsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListCleanRoomAssetsResponse) []CleanRoomAsset { + return resp.Assets + } + getNextReq := func(resp *ListCleanRoomAssetsResponse) *ListCleanRoomAssetsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List assets. +func (a *cleanRoomAssetsPreviewImpl) ListAll(ctx context.Context, request ListCleanRoomAssetsRequest) ([]CleanRoomAsset, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[CleanRoomAsset](ctx, iterator) +} +func (a *cleanRoomAssetsPreviewImpl) internalList(ctx context.Context, request ListCleanRoomAssetsRequest) (*ListCleanRoomAssetsResponse, error) { + var listCleanRoomAssetsResponse ListCleanRoomAssetsResponse + path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/assets", request.CleanRoomName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listCleanRoomAssetsResponse) + return &listCleanRoomAssetsResponse, err +} + +func (a *cleanRoomAssetsPreviewImpl) Update(ctx context.Context, request UpdateCleanRoomAssetRequest) (*CleanRoomAsset, error) { + var cleanRoomAsset CleanRoomAsset + path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/assets/%v/%v", request.CleanRoomName, request.AssetType, request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.Asset, &cleanRoomAsset) + return &cleanRoomAsset, err +} + +// unexported type that holds implementations of just CleanRoomTaskRunsPreview API methods +type cleanRoomTaskRunsPreviewImpl struct { + client *client.DatabricksClient +} + +// List notebook task runs. +// +// List all the historical notebook task runs in a clean room. +func (a *cleanRoomTaskRunsPreviewImpl) List(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) listing.Iterator[CleanRoomNotebookTaskRun] { + + getNextPage := func(ctx context.Context, req ListCleanRoomNotebookTaskRunsRequest) (*ListCleanRoomNotebookTaskRunsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListCleanRoomNotebookTaskRunsResponse) []CleanRoomNotebookTaskRun { + return resp.Runs + } + getNextReq := func(resp *ListCleanRoomNotebookTaskRunsResponse) *ListCleanRoomNotebookTaskRunsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List notebook task runs. +// +// List all the historical notebook task runs in a clean room. +func (a *cleanRoomTaskRunsPreviewImpl) ListAll(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) ([]CleanRoomNotebookTaskRun, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[CleanRoomNotebookTaskRun](ctx, iterator) +} +func (a *cleanRoomTaskRunsPreviewImpl) internalList(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) (*ListCleanRoomNotebookTaskRunsResponse, error) { + var listCleanRoomNotebookTaskRunsResponse ListCleanRoomNotebookTaskRunsResponse + path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/runs", request.CleanRoomName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listCleanRoomNotebookTaskRunsResponse) + return &listCleanRoomNotebookTaskRunsResponse, err +} + +// unexported type that holds implementations of just CleanRoomsPreview API methods +type cleanRoomsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *cleanRoomsPreviewImpl) Create(ctx context.Context, request CreateCleanRoomRequest) (*CleanRoom, error) { + var cleanRoom CleanRoom + path := "/api/2.0preview/clean-rooms" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.CleanRoom, &cleanRoom) + return &cleanRoom, err +} + +func (a *cleanRoomsPreviewImpl) CreateOutputCatalog(ctx context.Context, request CreateCleanRoomOutputCatalogRequest) (*CreateCleanRoomOutputCatalogResponse, error) { + var createCleanRoomOutputCatalogResponse CreateCleanRoomOutputCatalogResponse + path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/output-catalogs", request.CleanRoomName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.OutputCatalog, &createCleanRoomOutputCatalogResponse) + return &createCleanRoomOutputCatalogResponse, err +} + +func (a *cleanRoomsPreviewImpl) Delete(ctx context.Context, request DeleteCleanRoomRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *cleanRoomsPreviewImpl) Get(ctx context.Context, request GetCleanRoomRequest) (*CleanRoom, error) { + var cleanRoom CleanRoom + path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &cleanRoom) + return &cleanRoom, err +} + +// List clean rooms. +// +// Get a list of all clean rooms of the metastore. Only clean rooms the caller +// has access to are returned. +func (a *cleanRoomsPreviewImpl) List(ctx context.Context, request ListCleanRoomsRequest) listing.Iterator[CleanRoom] { + + getNextPage := func(ctx context.Context, req ListCleanRoomsRequest) (*ListCleanRoomsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListCleanRoomsResponse) []CleanRoom { + return resp.CleanRooms + } + getNextReq := func(resp *ListCleanRoomsResponse) *ListCleanRoomsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List clean rooms. +// +// Get a list of all clean rooms of the metastore. Only clean rooms the caller +// has access to are returned. +func (a *cleanRoomsPreviewImpl) ListAll(ctx context.Context, request ListCleanRoomsRequest) ([]CleanRoom, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[CleanRoom](ctx, iterator) +} +func (a *cleanRoomsPreviewImpl) internalList(ctx context.Context, request ListCleanRoomsRequest) (*ListCleanRoomsResponse, error) { + var listCleanRoomsResponse ListCleanRoomsResponse + path := "/api/2.0preview/clean-rooms" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listCleanRoomsResponse) + return &listCleanRoomsResponse, err +} + +func (a *cleanRoomsPreviewImpl) Update(ctx context.Context, request UpdateCleanRoomRequest) (*CleanRoom, error) { + var cleanRoom CleanRoom + path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &cleanRoom) + return &cleanRoom, err +} diff --git a/cleanrooms/v2preview/model.go b/cleanrooms/v2preview/model.go new file mode 100755 index 000000000..779242e92 --- /dev/null +++ b/cleanrooms/v2preview/model.go @@ -0,0 +1,1329 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package cleanroomspreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type CleanRoom struct { + // Whether clean room access is restricted due to [CSP] + // + // [CSP]: https://docs.databricks.com/en/security/privacy/security-profile.html + AccessRestricted CleanRoomAccessRestricted `json:"access_restricted,omitempty"` + + Comment string `json:"comment,omitempty"` + // When the clean room was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // The alias of the collaborator tied to the local clean room. + LocalCollaboratorAlias string `json:"local_collaborator_alias,omitempty"` + // The name of the clean room. It should follow [UC securable naming + // requirements]. + // + // [UC securable naming requirements]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#securable-object-naming-requirements + Name string `json:"name,omitempty"` + // Output catalog of the clean room. It is an output only field. Output + // catalog is manipulated using the separate CreateCleanRoomOutputCatalog + // API. + OutputCatalog *CleanRoomOutputCatalog `json:"output_catalog,omitempty"` + // This is Databricks username of the owner of the local clean room + // securable for permission management. + Owner string `json:"owner,omitempty"` + // Central clean room details. During creation, users need to specify + // cloud_vendor, region, and collaborators.global_metastore_id. This field + // will not be filled in the ListCleanRooms call. + RemoteDetailedInfo *CleanRoomRemoteDetail `json:"remote_detailed_info,omitempty"` + // Clean room status. + Status CleanRoomStatusEnum `json:"status,omitempty"` + // When the clean room was last updated, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoom) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoom) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CleanRoomAccessRestricted string + +const CleanRoomAccessRestrictedCspMismatch CleanRoomAccessRestricted = `CSP_MISMATCH` + +const CleanRoomAccessRestrictedNoRestriction CleanRoomAccessRestricted = `NO_RESTRICTION` + +// String representation for [fmt.Print] +func (f *CleanRoomAccessRestricted) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CleanRoomAccessRestricted) Set(v string) error { + switch v { + case `CSP_MISMATCH`, `NO_RESTRICTION`: + *f = CleanRoomAccessRestricted(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CSP_MISMATCH", "NO_RESTRICTION"`, v) + } +} + +// Type always returns CleanRoomAccessRestricted to satisfy [pflag.Value] interface +func (f *CleanRoomAccessRestricted) Type() string { + return "CleanRoomAccessRestricted" +} + +// Metadata of the clean room asset +type CleanRoomAsset struct { + // When the asset is added to the clean room, in epoch milliseconds. + AddedAt int64 `json:"added_at,omitempty"` + // The type of the asset. + AssetType CleanRoomAssetAssetType `json:"asset_type,omitempty"` + // Foreign table details available to all collaborators of the clean room. + // Present if and only if **asset_type** is **FOREIGN_TABLE** + ForeignTable *CleanRoomAssetForeignTable `json:"foreign_table,omitempty"` + // Local details for a foreign that are only available to its owner. Present + // if and only if **asset_type** is **FOREIGN_TABLE** + ForeignTableLocalDetails *CleanRoomAssetForeignTableLocalDetails `json:"foreign_table_local_details,omitempty"` + // A fully qualified name that uniquely identifies the asset within the + // clean room. This is also the name displayed in the clean room UI. + // + // For UC securable assets (tables, volumes, etc.), the format is + // *shared_catalog*.*shared_schema*.*asset_name* + // + // For notebooks, the name is the notebook file name. + Name string `json:"name,omitempty"` + // Notebook details available to all collaborators of the clean room. + // Present if and only if **asset_type** is **NOTEBOOK_FILE** + Notebook *CleanRoomAssetNotebook `json:"notebook,omitempty"` + // The alias of the collaborator who owns this asset + OwnerCollaboratorAlias string `json:"owner_collaborator_alias,omitempty"` + // Status of the asset + Status CleanRoomAssetStatusEnum `json:"status,omitempty"` + // Table details available to all collaborators of the clean room. Present + // if and only if **asset_type** is **TABLE** + Table *CleanRoomAssetTable `json:"table,omitempty"` + // Local details for a table that are only available to its owner. Present + // if and only if **asset_type** is **TABLE** + TableLocalDetails *CleanRoomAssetTableLocalDetails `json:"table_local_details,omitempty"` + // View details available to all collaborators of the clean room. Present if + // and only if **asset_type** is **VIEW** + View *CleanRoomAssetView `json:"view,omitempty"` + // Local details for a view that are only available to its owner. Present if + // and only if **asset_type** is **VIEW** + ViewLocalDetails *CleanRoomAssetViewLocalDetails `json:"view_local_details,omitempty"` + // Local details for a volume that are only available to its owner. Present + // if and only if **asset_type** is **VOLUME** + VolumeLocalDetails *CleanRoomAssetVolumeLocalDetails `json:"volume_local_details,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomAsset) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomAsset) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CleanRoomAssetAssetType string + +const CleanRoomAssetAssetTypeForeignTable CleanRoomAssetAssetType = `FOREIGN_TABLE` + +const CleanRoomAssetAssetTypeNotebookFile CleanRoomAssetAssetType = `NOTEBOOK_FILE` + +const CleanRoomAssetAssetTypeTable CleanRoomAssetAssetType = `TABLE` + +const CleanRoomAssetAssetTypeView CleanRoomAssetAssetType = `VIEW` + +const CleanRoomAssetAssetTypeVolume CleanRoomAssetAssetType = `VOLUME` + +// String representation for [fmt.Print] +func (f *CleanRoomAssetAssetType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CleanRoomAssetAssetType) Set(v string) error { + switch v { + case `FOREIGN_TABLE`, `NOTEBOOK_FILE`, `TABLE`, `VIEW`, `VOLUME`: + *f = CleanRoomAssetAssetType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FOREIGN_TABLE", "NOTEBOOK_FILE", "TABLE", "VIEW", "VOLUME"`, v) + } +} + +// Type always returns CleanRoomAssetAssetType to satisfy [pflag.Value] interface +func (f *CleanRoomAssetAssetType) Type() string { + return "CleanRoomAssetAssetType" +} + +type CleanRoomAssetForeignTable struct { + // The metadata information of the columns in the foreign table + Columns []ColumnInfo `json:"columns,omitempty"` +} + +type CleanRoomAssetForeignTableLocalDetails struct { + // The fully qualified name of the foreign table in its owner's local + // metastore, in the format of *catalog*.*schema*.*foreign_table_name* + LocalName string `json:"local_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomAssetForeignTableLocalDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomAssetForeignTableLocalDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CleanRoomAssetNotebook struct { + // Server generated checksum that represents the notebook version. + Etag string `json:"etag,omitempty"` + // Base 64 representation of the notebook contents. This is the same format + // as returned by :method:workspace/export with the format of **HTML**. + NotebookContent string `json:"notebook_content,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomAssetNotebook) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomAssetNotebook) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CleanRoomAssetStatusEnum string + +const CleanRoomAssetStatusEnumActive CleanRoomAssetStatusEnum = `ACTIVE` + +const CleanRoomAssetStatusEnumPending CleanRoomAssetStatusEnum = `PENDING` + +const CleanRoomAssetStatusEnumPermissionDenied CleanRoomAssetStatusEnum = `PERMISSION_DENIED` + +// String representation for [fmt.Print] +func (f *CleanRoomAssetStatusEnum) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CleanRoomAssetStatusEnum) Set(v string) error { + switch v { + case `ACTIVE`, `PENDING`, `PERMISSION_DENIED`: + *f = CleanRoomAssetStatusEnum(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "PENDING", "PERMISSION_DENIED"`, v) + } +} + +// Type always returns CleanRoomAssetStatusEnum to satisfy [pflag.Value] interface +func (f *CleanRoomAssetStatusEnum) Type() string { + return "CleanRoomAssetStatusEnum" +} + +type CleanRoomAssetTable struct { + // The metadata information of the columns in the table + Columns []ColumnInfo `json:"columns,omitempty"` +} + +type CleanRoomAssetTableLocalDetails struct { + // The fully qualified name of the table in its owner's local metastore, in + // the format of *catalog*.*schema*.*table_name* + LocalName string `json:"local_name,omitempty"` + // Partition filtering specification for a shared table. + Partitions []PartitionSpecificationPartition `json:"partitions,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomAssetTableLocalDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomAssetTableLocalDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CleanRoomAssetView struct { + // The metadata information of the columns in the view + Columns []ColumnInfo `json:"columns,omitempty"` +} + +type CleanRoomAssetViewLocalDetails struct { + // The fully qualified name of the view in its owner's local metastore, in + // the format of *catalog*.*schema*.*view_name* + LocalName string `json:"local_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomAssetViewLocalDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomAssetViewLocalDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CleanRoomAssetVolumeLocalDetails struct { + // The fully qualified name of the volume in its owner's local metastore, in + // the format of *catalog*.*schema*.*volume_name* + LocalName string `json:"local_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomAssetVolumeLocalDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomAssetVolumeLocalDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Publicly visible clean room collaborator. +type CleanRoomCollaborator struct { + // Collaborator alias specified by the clean room creator. It is unique + // across all collaborators of this clean room, and used to derive multiple + // values internally such as catalog alias and clean room name for single + // metastore clean rooms. It should follow [UC securable naming + // requirements]. + // + // [UC securable naming requirements]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#securable-object-naming-requirements + CollaboratorAlias string `json:"collaborator_alias"` + // Generated display name for the collaborator. In the case of a single + // metastore clean room, it is the clean room name. For x-metastore clean + // rooms, it is the organization name of the metastore. It is not restricted + // to these values and could change in the future + DisplayName string `json:"display_name,omitempty"` + // The global Unity Catalog metastore id of the collaborator. The identifier + // is of format cloud:region:metastore-uuid. + GlobalMetastoreId string `json:"global_metastore_id,omitempty"` + // Email of the user who is receiving the clean room "invitation". It should + // be empty for the creator of the clean room, and non-empty for the + // invitees of the clean room. It is only returned in the output when clean + // room creator calls GET + InviteRecipientEmail string `json:"invite_recipient_email,omitempty"` + // Workspace ID of the user who is receiving the clean room "invitation". + // Must be specified if invite_recipient_email is specified. It should be + // empty when the collaborator is the creator of the clean room. + InviteRecipientWorkspaceId int64 `json:"invite_recipient_workspace_id,omitempty"` + // [Organization + // name](:method:metastores/list#metastores-delta_sharing_organization_name) + // configured in the metastore + OrganizationName string `json:"organization_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomCollaborator) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomCollaborator) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Stores information about a single task run. +type CleanRoomNotebookTaskRun struct { + // Job run info of the task in the runner's local workspace. This field is + // only included in the LIST API. if the task was run within the same + // workspace the API is being called. If the task run was in a different + // workspace under the same metastore, only the workspace_id is included. + CollaboratorJobRunInfo *CollaboratorJobRunInfo `json:"collaborator_job_run_info,omitempty"` + // State of the task run. + NotebookJobRunState *CleanRoomTaskRunState `json:"notebook_job_run_state,omitempty"` + // Asset name of the notebook executed in this task run. + NotebookName string `json:"notebook_name,omitempty"` + // Expiration time of the output schema of the task run (if any), in epoch + // milliseconds. + OutputSchemaExpirationTime int64 `json:"output_schema_expiration_time,omitempty"` + // Name of the output schema associated with the clean rooms notebook task + // run. + OutputSchemaName string `json:"output_schema_name,omitempty"` + // Duration of the task run, in milliseconds. + RunDuration int64 `json:"run_duration,omitempty"` + // When the task run started, in epoch milliseconds. + StartTime int64 `json:"start_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomNotebookTaskRun) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomNotebookTaskRun) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CleanRoomOutputCatalog struct { + // The name of the output catalog in UC. It should follow [UC securable + // naming requirements]. The field will always exist if status is CREATED. + // + // [UC securable naming requirements]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#securable-object-naming-requirements + CatalogName string `json:"catalog_name,omitempty"` + + Status CleanRoomOutputCatalogOutputCatalogStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomOutputCatalog) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomOutputCatalog) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CleanRoomOutputCatalogOutputCatalogStatus string + +const CleanRoomOutputCatalogOutputCatalogStatusCreated CleanRoomOutputCatalogOutputCatalogStatus = `CREATED` + +const CleanRoomOutputCatalogOutputCatalogStatusNotCreated CleanRoomOutputCatalogOutputCatalogStatus = `NOT_CREATED` + +const CleanRoomOutputCatalogOutputCatalogStatusNotEligible CleanRoomOutputCatalogOutputCatalogStatus = `NOT_ELIGIBLE` + +// String representation for [fmt.Print] +func (f *CleanRoomOutputCatalogOutputCatalogStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CleanRoomOutputCatalogOutputCatalogStatus) Set(v string) error { + switch v { + case `CREATED`, `NOT_CREATED`, `NOT_ELIGIBLE`: + *f = CleanRoomOutputCatalogOutputCatalogStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CREATED", "NOT_CREATED", "NOT_ELIGIBLE"`, v) + } +} + +// Type always returns CleanRoomOutputCatalogOutputCatalogStatus to satisfy [pflag.Value] interface +func (f *CleanRoomOutputCatalogOutputCatalogStatus) Type() string { + return "CleanRoomOutputCatalogOutputCatalogStatus" +} + +// Publicly visible central clean room details. +type CleanRoomRemoteDetail struct { + // Central clean room ID. + CentralCleanRoomId string `json:"central_clean_room_id,omitempty"` + // Cloud vendor (aws,azure,gcp) of the central clean room. + CloudVendor string `json:"cloud_vendor,omitempty"` + // Collaborators in the central clean room. There should one and only one + // collaborator in the list that satisfies the owner condition: + // + // 1. It has the creator's global_metastore_id (determined by caller of + // CreateCleanRoom). + // + // 2. Its invite_recipient_email is empty. + Collaborators []CleanRoomCollaborator `json:"collaborators,omitempty"` + // The compliance security profile used to process regulated data following + // compliance standards. + ComplianceSecurityProfile *ComplianceSecurityProfile `json:"compliance_security_profile,omitempty"` + // Collaborator who creates the clean room. + Creator *CleanRoomCollaborator `json:"creator,omitempty"` + // Egress network policy to apply to the central clean room workspace. + EgressNetworkPolicy *EgressNetworkPolicy `json:"egress_network_policy,omitempty"` + // Region of the central clean room. + Region string `json:"region,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomRemoteDetail) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomRemoteDetail) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CleanRoomStatusEnum string + +const CleanRoomStatusEnumActive CleanRoomStatusEnum = `ACTIVE` + +const CleanRoomStatusEnumDeleted CleanRoomStatusEnum = `DELETED` + +const CleanRoomStatusEnumFailed CleanRoomStatusEnum = `FAILED` + +const CleanRoomStatusEnumProvisioning CleanRoomStatusEnum = `PROVISIONING` + +// String representation for [fmt.Print] +func (f *CleanRoomStatusEnum) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CleanRoomStatusEnum) Set(v string) error { + switch v { + case `ACTIVE`, `DELETED`, `FAILED`, `PROVISIONING`: + *f = CleanRoomStatusEnum(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "DELETED", "FAILED", "PROVISIONING"`, v) + } +} + +// Type always returns CleanRoomStatusEnum to satisfy [pflag.Value] interface +func (f *CleanRoomStatusEnum) Type() string { + return "CleanRoomStatusEnum" +} + +// Copied from elastic-spark-common/api/messages/runs.proto. Using the original +// definition to remove coupling with jobs API definition +type CleanRoomTaskRunLifeCycleState string + +const CleanRoomTaskRunLifeCycleStateBlocked CleanRoomTaskRunLifeCycleState = `BLOCKED` + +const CleanRoomTaskRunLifeCycleStateInternalError CleanRoomTaskRunLifeCycleState = `INTERNAL_ERROR` + +const CleanRoomTaskRunLifeCycleStatePending CleanRoomTaskRunLifeCycleState = `PENDING` + +const CleanRoomTaskRunLifeCycleStateQueued CleanRoomTaskRunLifeCycleState = `QUEUED` + +const CleanRoomTaskRunLifeCycleStateRunning CleanRoomTaskRunLifeCycleState = `RUNNING` + +const CleanRoomTaskRunLifeCycleStateRunLifeCycleStateUnspecified CleanRoomTaskRunLifeCycleState = `RUN_LIFE_CYCLE_STATE_UNSPECIFIED` + +const CleanRoomTaskRunLifeCycleStateSkipped CleanRoomTaskRunLifeCycleState = `SKIPPED` + +const CleanRoomTaskRunLifeCycleStateTerminated CleanRoomTaskRunLifeCycleState = `TERMINATED` + +const CleanRoomTaskRunLifeCycleStateTerminating CleanRoomTaskRunLifeCycleState = `TERMINATING` + +const CleanRoomTaskRunLifeCycleStateWaitingForRetry CleanRoomTaskRunLifeCycleState = `WAITING_FOR_RETRY` + +// String representation for [fmt.Print] +func (f *CleanRoomTaskRunLifeCycleState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CleanRoomTaskRunLifeCycleState) Set(v string) error { + switch v { + case `BLOCKED`, `INTERNAL_ERROR`, `PENDING`, `QUEUED`, `RUNNING`, `RUN_LIFE_CYCLE_STATE_UNSPECIFIED`, `SKIPPED`, `TERMINATED`, `TERMINATING`, `WAITING_FOR_RETRY`: + *f = CleanRoomTaskRunLifeCycleState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BLOCKED", "INTERNAL_ERROR", "PENDING", "QUEUED", "RUNNING", "RUN_LIFE_CYCLE_STATE_UNSPECIFIED", "SKIPPED", "TERMINATED", "TERMINATING", "WAITING_FOR_RETRY"`, v) + } +} + +// Type always returns CleanRoomTaskRunLifeCycleState to satisfy [pflag.Value] interface +func (f *CleanRoomTaskRunLifeCycleState) Type() string { + return "CleanRoomTaskRunLifeCycleState" +} + +// Copied from elastic-spark-common/api/messages/runs.proto. Using the original +// definition to avoid cyclic dependency. +type CleanRoomTaskRunResultState string + +const CleanRoomTaskRunResultStateCanceled CleanRoomTaskRunResultState = `CANCELED` + +const CleanRoomTaskRunResultStateDisabled CleanRoomTaskRunResultState = `DISABLED` + +const CleanRoomTaskRunResultStateEvicted CleanRoomTaskRunResultState = `EVICTED` + +const CleanRoomTaskRunResultStateExcluded CleanRoomTaskRunResultState = `EXCLUDED` + +const CleanRoomTaskRunResultStateFailed CleanRoomTaskRunResultState = `FAILED` + +const CleanRoomTaskRunResultStateMaximumConcurrentRunsReached CleanRoomTaskRunResultState = `MAXIMUM_CONCURRENT_RUNS_REACHED` + +const CleanRoomTaskRunResultStateRunResultStateUnspecified CleanRoomTaskRunResultState = `RUN_RESULT_STATE_UNSPECIFIED` + +const CleanRoomTaskRunResultStateSuccess CleanRoomTaskRunResultState = `SUCCESS` + +const CleanRoomTaskRunResultStateSuccessWithFailures CleanRoomTaskRunResultState = `SUCCESS_WITH_FAILURES` + +const CleanRoomTaskRunResultStateTimedout CleanRoomTaskRunResultState = `TIMEDOUT` + +const CleanRoomTaskRunResultStateUpstreamCanceled CleanRoomTaskRunResultState = `UPSTREAM_CANCELED` + +const CleanRoomTaskRunResultStateUpstreamEvicted CleanRoomTaskRunResultState = `UPSTREAM_EVICTED` + +const CleanRoomTaskRunResultStateUpstreamFailed CleanRoomTaskRunResultState = `UPSTREAM_FAILED` + +// String representation for [fmt.Print] +func (f *CleanRoomTaskRunResultState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CleanRoomTaskRunResultState) Set(v string) error { + switch v { + case `CANCELED`, `DISABLED`, `EVICTED`, `EXCLUDED`, `FAILED`, `MAXIMUM_CONCURRENT_RUNS_REACHED`, `RUN_RESULT_STATE_UNSPECIFIED`, `SUCCESS`, `SUCCESS_WITH_FAILURES`, `TIMEDOUT`, `UPSTREAM_CANCELED`, `UPSTREAM_EVICTED`, `UPSTREAM_FAILED`: + *f = CleanRoomTaskRunResultState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "DISABLED", "EVICTED", "EXCLUDED", "FAILED", "MAXIMUM_CONCURRENT_RUNS_REACHED", "RUN_RESULT_STATE_UNSPECIFIED", "SUCCESS", "SUCCESS_WITH_FAILURES", "TIMEDOUT", "UPSTREAM_CANCELED", "UPSTREAM_EVICTED", "UPSTREAM_FAILED"`, v) + } +} + +// Type always returns CleanRoomTaskRunResultState to satisfy [pflag.Value] interface +func (f *CleanRoomTaskRunResultState) Type() string { + return "CleanRoomTaskRunResultState" +} + +// Stores the run state of the clean rooms notebook task. +type CleanRoomTaskRunState struct { + // A value indicating the run's current lifecycle state. This field is + // always available in the response. + LifeCycleState CleanRoomTaskRunLifeCycleState `json:"life_cycle_state,omitempty"` + // A value indicating the run's result. This field is only available for + // terminal lifecycle states. + ResultState CleanRoomTaskRunResultState `json:"result_state,omitempty"` +} + +type CollaboratorJobRunInfo struct { + // Alias of the collaborator that triggered the task run. + CollaboratorAlias string `json:"collaborator_alias,omitempty"` + // Job ID of the task run in the collaborator's workspace. + CollaboratorJobId int64 `json:"collaborator_job_id,omitempty"` + // Job run ID of the task run in the collaborator's workspace. + CollaboratorJobRunId int64 `json:"collaborator_job_run_id,omitempty"` + // Task run ID of the task run in the collaborator's workspace. + CollaboratorTaskRunId int64 `json:"collaborator_task_run_id,omitempty"` + // ID of the collaborator's workspace that triggered the task run. + CollaboratorWorkspaceId int64 `json:"collaborator_workspace_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CollaboratorJobRunInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CollaboratorJobRunInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ColumnInfo struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + + Mask *ColumnMask `json:"mask,omitempty"` + // Name of Column. + Name string `json:"name,omitempty"` + // Whether field may be Null (default: true). + Nullable bool `json:"nullable,omitempty"` + // Partition index for column. + PartitionIndex int `json:"partition_index,omitempty"` + // Ordinal position of column (starting at position 0). + Position int `json:"position,omitempty"` + // Format of IntervalType. + TypeIntervalType string `json:"type_interval_type,omitempty"` + // Full data type specification, JSON-serialized. + TypeJson string `json:"type_json,omitempty"` + + TypeName ColumnTypeName `json:"type_name,omitempty"` + // Digits of precision; required for DecimalTypes. + TypePrecision int `json:"type_precision,omitempty"` + // Digits to right of decimal; Required for DecimalTypes. + TypeScale int `json:"type_scale,omitempty"` + // Full data type specification as SQL/catalogString text. + TypeText string `json:"type_text,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ColumnInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ColumnInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ColumnMask struct { + // The full name of the column mask SQL UDF. + FunctionName string `json:"function_name,omitempty"` + // The list of additional table columns to be passed as input to the column + // mask function. The first arg of the mask function should be of the type + // of the column being masked and the types of the rest of the args should + // match the types of columns in 'using_column_names'. + UsingColumnNames []string `json:"using_column_names,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ColumnMask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ColumnMask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ColumnTypeName string + +const ColumnTypeNameArray ColumnTypeName = `ARRAY` + +const ColumnTypeNameBinary ColumnTypeName = `BINARY` + +const ColumnTypeNameBoolean ColumnTypeName = `BOOLEAN` + +const ColumnTypeNameByte ColumnTypeName = `BYTE` + +const ColumnTypeNameChar ColumnTypeName = `CHAR` + +const ColumnTypeNameDate ColumnTypeName = `DATE` + +const ColumnTypeNameDecimal ColumnTypeName = `DECIMAL` + +const ColumnTypeNameDouble ColumnTypeName = `DOUBLE` + +const ColumnTypeNameFloat ColumnTypeName = `FLOAT` + +const ColumnTypeNameInt ColumnTypeName = `INT` + +const ColumnTypeNameInterval ColumnTypeName = `INTERVAL` + +const ColumnTypeNameLong ColumnTypeName = `LONG` + +const ColumnTypeNameMap ColumnTypeName = `MAP` + +const ColumnTypeNameNull ColumnTypeName = `NULL` + +const ColumnTypeNameShort ColumnTypeName = `SHORT` + +const ColumnTypeNameString ColumnTypeName = `STRING` + +const ColumnTypeNameStruct ColumnTypeName = `STRUCT` + +const ColumnTypeNameTableType ColumnTypeName = `TABLE_TYPE` + +const ColumnTypeNameTimestamp ColumnTypeName = `TIMESTAMP` + +const ColumnTypeNameTimestampNtz ColumnTypeName = `TIMESTAMP_NTZ` + +const ColumnTypeNameUserDefinedType ColumnTypeName = `USER_DEFINED_TYPE` + +const ColumnTypeNameVariant ColumnTypeName = `VARIANT` + +// String representation for [fmt.Print] +func (f *ColumnTypeName) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ColumnTypeName) Set(v string) error { + switch v { + case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`, `VARIANT`: + *f = ColumnTypeName(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TABLE_TYPE", "TIMESTAMP", "TIMESTAMP_NTZ", "USER_DEFINED_TYPE", "VARIANT"`, v) + } +} + +// Type always returns ColumnTypeName to satisfy [pflag.Value] interface +func (f *ColumnTypeName) Type() string { + return "ColumnTypeName" +} + +// The compliance security profile used to process regulated data following +// compliance standards. +type ComplianceSecurityProfile struct { + // The list of compliance standards that the compliance security profile is + // configured to enforce. + ComplianceStandards []ComplianceStandard `json:"compliance_standards,omitempty"` + // Whether the compliance security profile is enabled. + IsEnabled bool `json:"is_enabled,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ComplianceSecurityProfile) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ComplianceSecurityProfile) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Compliance stardard for SHIELD customers +type ComplianceStandard string + +const ComplianceStandardCanadaProtectedB ComplianceStandard = `CANADA_PROTECTED_B` + +const ComplianceStandardCyberEssentialPlus ComplianceStandard = `CYBER_ESSENTIAL_PLUS` + +const ComplianceStandardFedrampHigh ComplianceStandard = `FEDRAMP_HIGH` + +const ComplianceStandardFedrampIl5 ComplianceStandard = `FEDRAMP_IL5` + +const ComplianceStandardFedrampModerate ComplianceStandard = `FEDRAMP_MODERATE` + +const ComplianceStandardHipaa ComplianceStandard = `HIPAA` + +const ComplianceStandardHitrust ComplianceStandard = `HITRUST` + +const ComplianceStandardIrapProtected ComplianceStandard = `IRAP_PROTECTED` + +const ComplianceStandardIsmap ComplianceStandard = `ISMAP` + +const ComplianceStandardItarEar ComplianceStandard = `ITAR_EAR` + +const ComplianceStandardNone ComplianceStandard = `NONE` + +const ComplianceStandardPciDss ComplianceStandard = `PCI_DSS` + +// String representation for [fmt.Print] +func (f *ComplianceStandard) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ComplianceStandard) Set(v string) error { + switch v { + case `CANADA_PROTECTED_B`, `CYBER_ESSENTIAL_PLUS`, `FEDRAMP_HIGH`, `FEDRAMP_IL5`, `FEDRAMP_MODERATE`, `HIPAA`, `HITRUST`, `IRAP_PROTECTED`, `ISMAP`, `ITAR_EAR`, `NONE`, `PCI_DSS`: + *f = ComplianceStandard(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANADA_PROTECTED_B", "CYBER_ESSENTIAL_PLUS", "FEDRAMP_HIGH", "FEDRAMP_IL5", "FEDRAMP_MODERATE", "HIPAA", "HITRUST", "IRAP_PROTECTED", "ISMAP", "ITAR_EAR", "NONE", "PCI_DSS"`, v) + } +} + +// Type always returns ComplianceStandard to satisfy [pflag.Value] interface +func (f *ComplianceStandard) Type() string { + return "ComplianceStandard" +} + +// Create an asset +type CreateCleanRoomAssetRequest struct { + // Metadata of the clean room asset + Asset *CleanRoomAsset `json:"asset,omitempty"` + // Name of the clean room. + CleanRoomName string `json:"-" url:"-"` +} + +// Create an output catalog +type CreateCleanRoomOutputCatalogRequest struct { + // Name of the clean room. + CleanRoomName string `json:"-" url:"-"` + + OutputCatalog *CleanRoomOutputCatalog `json:"output_catalog,omitempty"` +} + +type CreateCleanRoomOutputCatalogResponse struct { + OutputCatalog *CleanRoomOutputCatalog `json:"output_catalog,omitempty"` +} + +// Create a clean room +type CreateCleanRoomRequest struct { + CleanRoom *CleanRoom `json:"clean_room,omitempty"` +} + +// Delete an asset +type DeleteCleanRoomAssetRequest struct { + // The fully qualified name of the asset, it is same as the name field in + // CleanRoomAsset. + AssetFullName string `json:"-" url:"-"` + // The type of the asset. + AssetType CleanRoomAssetAssetType `json:"-" url:"-"` + // Name of the clean room. + CleanRoomName string `json:"-" url:"-"` +} + +// Response for delete clean room request. Using an empty message since the +// generic Empty proto does not externd UnshadedMessageMarker. +type DeleteCleanRoomAssetResponse struct { +} + +// Delete a clean room +type DeleteCleanRoomRequest struct { + // Name of the clean room. + Name string `json:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// The network policies applying for egress traffic. This message is used by the +// UI/REST API. We translate this message to the format expected by the +// dataplane in Lakehouse Network Manager (for the format expected by the +// dataplane, see networkconfig.textproto). +type EgressNetworkPolicy struct { + // The access policy enforced for egress traffic to the internet. + InternetAccess *EgressNetworkPolicyInternetAccessPolicy `json:"internet_access,omitempty"` +} + +type EgressNetworkPolicyInternetAccessPolicy struct { + AllowedInternetDestinations []EgressNetworkPolicyInternetAccessPolicyInternetDestination `json:"allowed_internet_destinations,omitempty"` + + AllowedStorageDestinations []EgressNetworkPolicyInternetAccessPolicyStorageDestination `json:"allowed_storage_destinations,omitempty"` + // Optional. If not specified, assume the policy is enforced for all + // workloads. + LogOnlyMode *EgressNetworkPolicyInternetAccessPolicyLogOnlyMode `json:"log_only_mode,omitempty"` + // At which level can Databricks and Databricks managed compute access + // Internet. FULL_ACCESS: Databricks can access Internet. No blocking rules + // will apply. RESTRICTED_ACCESS: Databricks can only access explicitly + // allowed internet and storage destinations, as well as UC connections and + // external locations. PRIVATE_ACCESS_ONLY (not used): Databricks can only + // access destinations via private link. + RestrictionMode EgressNetworkPolicyInternetAccessPolicyRestrictionMode `json:"restriction_mode,omitempty"` +} + +// Users can specify accessible internet destinations when outbound access is +// restricted. We only support domain name (FQDN) destinations for the time +// being, though going forwards we want to support host names and IP addresses. +type EgressNetworkPolicyInternetAccessPolicyInternetDestination struct { + Destination string `json:"destination,omitempty"` + // The filtering protocol used by the DP. For private and public preview, + // SEG will only support TCP filtering (i.e. DNS based filtering, filtering + // by destination IP address), so protocol will be set to TCP by default and + // hidden from the user. In the future, users may be able to select HTTP + // filtering (i.e. SNI based filtering, filtering by FQDN). + Protocol EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationFilteringProtocol `json:"protocol,omitempty"` + + Type EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationType `json:"type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EgressNetworkPolicyInternetAccessPolicyInternetDestination) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EgressNetworkPolicyInternetAccessPolicyInternetDestination) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The filtering protocol used by the DP. For private and public preview, SEG +// will only support TCP filtering (i.e. DNS based filtering, filtering by +// destination IP address), so protocol will be set to TCP by default and hidden +// from the user. In the future, users may be able to select HTTP filtering +// (i.e. SNI based filtering, filtering by FQDN). +type EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationFilteringProtocol string + +const EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationFilteringProtocolTcp EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationFilteringProtocol = `TCP` + +// String representation for [fmt.Print] +func (f *EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationFilteringProtocol) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationFilteringProtocol) Set(v string) error { + switch v { + case `TCP`: + *f = EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationFilteringProtocol(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "TCP"`, v) + } +} + +// Type always returns EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationFilteringProtocol to satisfy [pflag.Value] interface +func (f *EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationFilteringProtocol) Type() string { + return "EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationFilteringProtocol" +} + +type EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationType string + +const EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationTypeFqdn EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationType = `FQDN` + +// String representation for [fmt.Print] +func (f *EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationType) Set(v string) error { + switch v { + case `FQDN`: + *f = EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FQDN"`, v) + } +} + +// Type always returns EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationType to satisfy [pflag.Value] interface +func (f *EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationType) Type() string { + return "EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationType" +} + +type EgressNetworkPolicyInternetAccessPolicyLogOnlyMode struct { + LogOnlyModeType EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeType `json:"log_only_mode_type,omitempty"` + + Workloads []EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadType `json:"workloads,omitempty"` +} + +type EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeType string + +const EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeTypeAllServices EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeType = `ALL_SERVICES` + +const EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeTypeSelectedServices EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeType = `SELECTED_SERVICES` + +// String representation for [fmt.Print] +func (f *EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeType) Set(v string) error { + switch v { + case `ALL_SERVICES`, `SELECTED_SERVICES`: + *f = EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALL_SERVICES", "SELECTED_SERVICES"`, v) + } +} + +// Type always returns EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeType to satisfy [pflag.Value] interface +func (f *EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeType) Type() string { + return "EgressNetworkPolicyInternetAccessPolicyLogOnlyModeLogOnlyModeType" +} + +// The values should match the list of workloads used in networkconfig.proto +type EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadType string + +const EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadTypeDbsql EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadType = `DBSQL` + +const EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadTypeMlServing EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadType = `ML_SERVING` + +// String representation for [fmt.Print] +func (f *EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadType) Set(v string) error { + switch v { + case `DBSQL`, `ML_SERVING`: + *f = EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DBSQL", "ML_SERVING"`, v) + } +} + +// Type always returns EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadType to satisfy [pflag.Value] interface +func (f *EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadType) Type() string { + return "EgressNetworkPolicyInternetAccessPolicyLogOnlyModeWorkloadType" +} + +// At which level can Databricks and Databricks managed compute access Internet. +// FULL_ACCESS: Databricks can access Internet. No blocking rules will apply. +// RESTRICTED_ACCESS: Databricks can only access explicitly allowed internet and +// storage destinations, as well as UC connections and external locations. +// PRIVATE_ACCESS_ONLY (not used): Databricks can only access destinations via +// private link. +type EgressNetworkPolicyInternetAccessPolicyRestrictionMode string + +const EgressNetworkPolicyInternetAccessPolicyRestrictionModeFullAccess EgressNetworkPolicyInternetAccessPolicyRestrictionMode = `FULL_ACCESS` + +const EgressNetworkPolicyInternetAccessPolicyRestrictionModePrivateAccessOnly EgressNetworkPolicyInternetAccessPolicyRestrictionMode = `PRIVATE_ACCESS_ONLY` + +const EgressNetworkPolicyInternetAccessPolicyRestrictionModeRestrictedAccess EgressNetworkPolicyInternetAccessPolicyRestrictionMode = `RESTRICTED_ACCESS` + +// String representation for [fmt.Print] +func (f *EgressNetworkPolicyInternetAccessPolicyRestrictionMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EgressNetworkPolicyInternetAccessPolicyRestrictionMode) Set(v string) error { + switch v { + case `FULL_ACCESS`, `PRIVATE_ACCESS_ONLY`, `RESTRICTED_ACCESS`: + *f = EgressNetworkPolicyInternetAccessPolicyRestrictionMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FULL_ACCESS", "PRIVATE_ACCESS_ONLY", "RESTRICTED_ACCESS"`, v) + } +} + +// Type always returns EgressNetworkPolicyInternetAccessPolicyRestrictionMode to satisfy [pflag.Value] interface +func (f *EgressNetworkPolicyInternetAccessPolicyRestrictionMode) Type() string { + return "EgressNetworkPolicyInternetAccessPolicyRestrictionMode" +} + +// Users can specify accessible storage destinations. +type EgressNetworkPolicyInternetAccessPolicyStorageDestination struct { + AllowedPaths []string `json:"allowed_paths,omitempty"` + + AzureContainer string `json:"azure_container,omitempty"` + + AzureDnsZone string `json:"azure_dns_zone,omitempty"` + + AzureStorageAccount string `json:"azure_storage_account,omitempty"` + + AzureStorageService string `json:"azure_storage_service,omitempty"` + + BucketName string `json:"bucket_name,omitempty"` + + Region string `json:"region,omitempty"` + + Type EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType `json:"type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EgressNetworkPolicyInternetAccessPolicyStorageDestination) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EgressNetworkPolicyInternetAccessPolicyStorageDestination) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType string + +const EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationTypeAwsS3 EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType = `AWS_S3` + +const EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationTypeAzureStorage EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType = `AZURE_STORAGE` + +const EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationTypeCloudflareR2 EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType = `CLOUDFLARE_R2` + +const EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationTypeGoogleCloudStorage EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType = `GOOGLE_CLOUD_STORAGE` + +// String representation for [fmt.Print] +func (f *EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType) Set(v string) error { + switch v { + case `AWS_S3`, `AZURE_STORAGE`, `CLOUDFLARE_R2`, `GOOGLE_CLOUD_STORAGE`: + *f = EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AWS_S3", "AZURE_STORAGE", "CLOUDFLARE_R2", "GOOGLE_CLOUD_STORAGE"`, v) + } +} + +// Type always returns EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType to satisfy [pflag.Value] interface +func (f *EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType) Type() string { + return "EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType" +} + +// Get an asset +type GetCleanRoomAssetRequest struct { + // The fully qualified name of the asset, it is same as the name field in + // CleanRoomAsset. + AssetFullName string `json:"-" url:"-"` + // The type of the asset. + AssetType CleanRoomAssetAssetType `json:"-" url:"-"` + // Name of the clean room. + CleanRoomName string `json:"-" url:"-"` +} + +// Get a clean room +type GetCleanRoomRequest struct { + Name string `json:"-" url:"-"` +} + +// List assets +type ListCleanRoomAssetsRequest struct { + // Name of the clean room. + CleanRoomName string `json:"-" url:"-"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListCleanRoomAssetsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListCleanRoomAssetsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListCleanRoomAssetsResponse struct { + // Assets in the clean room. + Assets []CleanRoomAsset `json:"assets,omitempty"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. page_token should be set to this value for the next request + // (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListCleanRoomAssetsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListCleanRoomAssetsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List notebook task runs +type ListCleanRoomNotebookTaskRunsRequest struct { + // Name of the clean room. + CleanRoomName string `json:"-" url:"-"` + // Notebook name + NotebookName string `json:"-" url:"notebook_name,omitempty"` + // The maximum number of task runs to return + PageSize int `json:"-" url:"page_size,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListCleanRoomNotebookTaskRunsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListCleanRoomNotebookTaskRunsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListCleanRoomNotebookTaskRunsResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. page_token should be set to this value for the next request + // (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + // Name of the clean room. + Runs []CleanRoomNotebookTaskRun `json:"runs,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListCleanRoomNotebookTaskRunsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListCleanRoomNotebookTaskRunsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List clean rooms +type ListCleanRoomsRequest struct { + // Maximum number of clean rooms to return (i.e., the page length). Defaults + // to 100. + PageSize int `json:"-" url:"page_size,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListCleanRoomsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListCleanRoomsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListCleanRoomsResponse struct { + CleanRooms []CleanRoom `json:"clean_rooms,omitempty"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. page_token should be set to this value for the next request + // (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListCleanRoomsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListCleanRoomsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PartitionSpecificationPartition struct { + // An array of partition values. + Values []PartitionValue `json:"values,omitempty"` +} + +type PartitionValue struct { + // The name of the partition column. + Name string `json:"name,omitempty"` + // The operator to apply for the value. + Op PartitionValueOp `json:"op,omitempty"` + // The key of a Delta Sharing recipient's property. For example + // "databricks-account-id". When this field is set, field `value` can not be + // set. + RecipientPropertyKey string `json:"recipient_property_key,omitempty"` + // The value of the partition column. When this value is not set, it means + // `null` value. When this field is set, field `recipient_property_key` can + // not be set. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PartitionValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PartitionValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PartitionValueOp string + +const PartitionValueOpEqual PartitionValueOp = `EQUAL` + +const PartitionValueOpLike PartitionValueOp = `LIKE` + +// String representation for [fmt.Print] +func (f *PartitionValueOp) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PartitionValueOp) Set(v string) error { + switch v { + case `EQUAL`, `LIKE`: + *f = PartitionValueOp(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EQUAL", "LIKE"`, v) + } +} + +// Type always returns PartitionValueOp to satisfy [pflag.Value] interface +func (f *PartitionValueOp) Type() string { + return "PartitionValueOp" +} + +// Update an asset +type UpdateCleanRoomAssetRequest struct { + // Metadata of the clean room asset + Asset *CleanRoomAsset `json:"asset,omitempty"` + // The type of the asset. + AssetType CleanRoomAssetAssetType `json:"-" url:"-"` + // Name of the clean room. + CleanRoomName string `json:"-" url:"-"` + // A fully qualified name that uniquely identifies the asset within the + // clean room. This is also the name displayed in the clean room UI. + // + // For UC securable assets (tables, volumes, etc.), the format is + // *shared_catalog*.*shared_schema*.*asset_name* + // + // For notebooks, the name is the notebook file name. + Name string `json:"-" url:"-"` +} + +type UpdateCleanRoomRequest struct { + CleanRoom *CleanRoom `json:"clean_room,omitempty"` + // Name of the clean room. + Name string `json:"-" url:"-"` +} diff --git a/compute/v2preview/api.go b/compute/v2preview/api.go new file mode 100755 index 000000000..f5d3fc7ba --- /dev/null +++ b/compute/v2preview/api.go @@ -0,0 +1,1501 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Cluster Policies Preview, Clusters Preview, Command Execution Preview, Global Init Scripts Preview, Instance Pools Preview, Instance Profiles Preview, Libraries Preview, Policy Compliance For Clusters Preview, Policy Families Preview, etc. +package computepreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type ClusterPoliciesPreviewInterface interface { + + // Create a new policy. + // + // Creates a new policy with prescribed settings. + Create(ctx context.Context, request CreatePolicy) (*CreatePolicyResponse, error) + + // Delete a cluster policy. + // + // Delete a policy for a cluster. Clusters governed by this policy can still + // run, but cannot be edited. + Delete(ctx context.Context, request DeletePolicy) error + + // Delete a cluster policy. + // + // Delete a policy for a cluster. Clusters governed by this policy can still + // run, but cannot be edited. + DeleteByPolicyId(ctx context.Context, policyId string) error + + // Update a cluster policy. + // + // Update an existing policy for cluster. This operation may make some clusters + // governed by the previous policy invalid. + Edit(ctx context.Context, request EditPolicy) error + + // Get a cluster policy. + // + // Get a cluster policy entity. Creation and editing is available to admins + // only. + Get(ctx context.Context, request GetClusterPolicyRequest) (*Policy, error) + + // Get a cluster policy. + // + // Get a cluster policy entity. Creation and editing is available to admins + // only. + GetByPolicyId(ctx context.Context, policyId string) (*Policy, error) + + // Get cluster policy permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetClusterPolicyPermissionLevelsRequest) (*GetClusterPolicyPermissionLevelsResponse, error) + + // Get cluster policy permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*GetClusterPolicyPermissionLevelsResponse, error) + + // Get cluster policy permissions. + // + // Gets the permissions of a cluster policy. Cluster policies can inherit + // permissions from their root object. + GetPermissions(ctx context.Context, request GetClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) + + // Get cluster policy permissions. + // + // Gets the permissions of a cluster policy. Cluster policies can inherit + // permissions from their root object. + GetPermissionsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*ClusterPolicyPermissions, error) + + // List cluster policies. + // + // Returns a list of policies accessible by the requesting user. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListClusterPoliciesRequest) listing.Iterator[Policy] + + // List cluster policies. + // + // Returns a list of policies accessible by the requesting user. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListClusterPoliciesRequest) ([]Policy, error) + + // PolicyNameToPolicyIdMap calls [ClusterPoliciesPreviewAPI.ListAll] and creates a map of results with [Policy].Name as key and [Policy].PolicyId as value. + // + // Returns an error if there's more than one [Policy] with the same .Name. + // + // Note: All [Policy] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + PolicyNameToPolicyIdMap(ctx context.Context, request ListClusterPoliciesRequest) (map[string]string, error) + + // GetByName calls [ClusterPoliciesPreviewAPI.PolicyNameToPolicyIdMap] and returns a single [Policy]. + // + // Returns an error if there's more than one [Policy] with the same .Name. + // + // Note: All [Policy] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*Policy, error) + + // Set cluster policy permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) + + // Update cluster policy permissions. + // + // Updates the permissions on a cluster policy. Cluster policies can inherit + // permissions from their root object. + UpdatePermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) +} + +func NewClusterPoliciesPreview(client *client.DatabricksClient) *ClusterPoliciesPreviewAPI { + return &ClusterPoliciesPreviewAPI{ + clusterPoliciesPreviewImpl: clusterPoliciesPreviewImpl{ + client: client, + }, + } +} + +// You can use cluster policies to control users' ability to configure clusters +// based on a set of rules. These rules specify which attributes or attribute +// values can be used during cluster creation. Cluster policies have ACLs that +// limit their use to specific users and groups. +// +// With cluster policies, you can: - Auto-install cluster libraries on the next +// restart by listing them in the policy's "libraries" field (Public Preview). - +// Limit users to creating clusters with the prescribed settings. - Simplify the +// user interface, enabling more users to create clusters, by fixing and hiding +// some fields. - Manage costs by setting limits on attributes that impact the +// hourly rate. +// +// Cluster policy permissions limit which policies a user can select in the +// Policy drop-down when the user creates a cluster: - A user who has +// unrestricted cluster create permission can select the Unrestricted policy and +// create fully-configurable clusters. - A user who has both unrestricted +// cluster create permission and access to cluster policies can select the +// Unrestricted policy and policies they have access to. - A user that has +// access to only cluster policies, can select the policies they have access to. +// +// If no policies exist in the workspace, the Policy drop-down doesn't appear. +// Only admin users can create, edit, and delete policies. Admin users also have +// access to all policies. +type ClusterPoliciesPreviewAPI struct { + clusterPoliciesPreviewImpl +} + +// Delete a cluster policy. +// +// Delete a policy for a cluster. Clusters governed by this policy can still +// run, but cannot be edited. +func (a *ClusterPoliciesPreviewAPI) DeleteByPolicyId(ctx context.Context, policyId string) error { + return a.clusterPoliciesPreviewImpl.Delete(ctx, DeletePolicy{ + PolicyId: policyId, + }) +} + +// Get a cluster policy. +// +// Get a cluster policy entity. Creation and editing is available to admins +// only. +func (a *ClusterPoliciesPreviewAPI) GetByPolicyId(ctx context.Context, policyId string) (*Policy, error) { + return a.clusterPoliciesPreviewImpl.Get(ctx, GetClusterPolicyRequest{ + PolicyId: policyId, + }) +} + +// Get cluster policy permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *ClusterPoliciesPreviewAPI) GetPermissionLevelsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*GetClusterPolicyPermissionLevelsResponse, error) { + return a.clusterPoliciesPreviewImpl.GetPermissionLevels(ctx, GetClusterPolicyPermissionLevelsRequest{ + ClusterPolicyId: clusterPolicyId, + }) +} + +// Get cluster policy permissions. +// +// Gets the permissions of a cluster policy. Cluster policies can inherit +// permissions from their root object. +func (a *ClusterPoliciesPreviewAPI) GetPermissionsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*ClusterPolicyPermissions, error) { + return a.clusterPoliciesPreviewImpl.GetPermissions(ctx, GetClusterPolicyPermissionsRequest{ + ClusterPolicyId: clusterPolicyId, + }) +} + +// PolicyNameToPolicyIdMap calls [ClusterPoliciesPreviewAPI.ListAll] and creates a map of results with [Policy].Name as key and [Policy].PolicyId as value. +// +// Returns an error if there's more than one [Policy] with the same .Name. +// +// Note: All [Policy] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ClusterPoliciesPreviewAPI) PolicyNameToPolicyIdMap(ctx context.Context, request ListClusterPoliciesRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.PolicyId + } + return mapping, nil +} + +// GetByName calls [ClusterPoliciesPreviewAPI.PolicyNameToPolicyIdMap] and returns a single [Policy]. +// +// Returns an error if there's more than one [Policy] with the same .Name. +// +// Note: All [Policy] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ClusterPoliciesPreviewAPI) GetByName(ctx context.Context, name string) (*Policy, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListClusterPoliciesRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]Policy{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("Policy named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of Policy named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type ClustersPreviewInterface interface { + + // Change cluster owner. + // + // Change the owner of the cluster. You must be an admin and the cluster must be + // terminated to perform this operation. The service principal application ID + // can be supplied as an argument to `owner_username`. + ChangeOwner(ctx context.Context, request ChangeClusterOwner) error + + // Create new cluster. + // + // Creates a new Spark cluster. This method will acquire new instances from the + // cloud provider if necessary. Note: Databricks may not be able to acquire some + // of the requested nodes, due to cloud provider limitations (account limits, + // spot price, etc.) or transient network issues. + // + // If Databricks acquires at least 85% of the requested on-demand nodes, cluster + // creation will succeed. Otherwise the cluster will terminate with an + // informative error message. + // + // Rather than authoring the cluster's JSON definition from scratch, Databricks + // recommends filling out the [create compute UI] and then copying the generated + // JSON definition from the UI. + // + // [create compute UI]: https://docs.databricks.com/compute/configure.html + Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error) + + // Terminate cluster. + // + // Terminates the Spark cluster with the specified ID. The cluster is removed + // asynchronously. Once the termination has completed, the cluster will be in a + // `TERMINATED` state. If the cluster is already in a `TERMINATING` or + // `TERMINATED` state, nothing will happen. + Delete(ctx context.Context, request DeleteCluster) error + + // Terminate cluster. + // + // Terminates the Spark cluster with the specified ID. The cluster is removed + // asynchronously. Once the termination has completed, the cluster will be in a + // `TERMINATED` state. If the cluster is already in a `TERMINATING` or + // `TERMINATED` state, nothing will happen. + DeleteByClusterId(ctx context.Context, clusterId string) error + + // Update cluster configuration. + // + // Updates the configuration of a cluster to match the provided attributes and + // size. A cluster can be updated if it is in a `RUNNING` or `TERMINATED` state. + // + // If a cluster is updated while in a `RUNNING` state, it will be restarted so + // that the new attributes can take effect. + // + // If a cluster is updated while in a `TERMINATED` state, it will remain + // `TERMINATED`. The next time it is started using the `clusters/start` API, the + // new attributes will take effect. Any attempt to update a cluster in any other + // state will be rejected with an `INVALID_STATE` error code. + // + // Clusters created by the Databricks Jobs service cannot be edited. + Edit(ctx context.Context, request EditCluster) error + + // List cluster activity events. + // + // Retrieves a list of events about the activity of a cluster. This API is + // paginated. If there are more events to read, the response includes all the + // nparameters necessary to request the next page of events. + // + // This method is generated by Databricks SDK Code Generator. + Events(ctx context.Context, request GetEvents) listing.Iterator[ClusterEvent] + + // List cluster activity events. + // + // Retrieves a list of events about the activity of a cluster. This API is + // paginated. If there are more events to read, the response includes all the + // nparameters necessary to request the next page of events. + // + // This method is generated by Databricks SDK Code Generator. + EventsAll(ctx context.Context, request GetEvents) ([]ClusterEvent, error) + + // Get cluster info. + // + // Retrieves the information for a cluster given its identifier. Clusters can be + // described while they are running, or up to 60 days after they are terminated. + Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error) + + // Get cluster info. + // + // Retrieves the information for a cluster given its identifier. Clusters can be + // described while they are running, or up to 60 days after they are terminated. + GetByClusterId(ctx context.Context, clusterId string) (*ClusterDetails, error) + + // Get cluster permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetClusterPermissionLevelsRequest) (*GetClusterPermissionLevelsResponse, error) + + // Get cluster permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByClusterId(ctx context.Context, clusterId string) (*GetClusterPermissionLevelsResponse, error) + + // Get cluster permissions. + // + // Gets the permissions of a cluster. Clusters can inherit permissions from + // their root object. + GetPermissions(ctx context.Context, request GetClusterPermissionsRequest) (*ClusterPermissions, error) + + // Get cluster permissions. + // + // Gets the permissions of a cluster. Clusters can inherit permissions from + // their root object. + GetPermissionsByClusterId(ctx context.Context, clusterId string) (*ClusterPermissions, error) + + // List clusters. + // + // Return information about all pinned and active clusters, and all clusters + // terminated within the last 30 days. Clusters terminated prior to this period + // are not included. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListClustersRequest) listing.Iterator[ClusterDetails] + + // List clusters. + // + // Return information about all pinned and active clusters, and all clusters + // terminated within the last 30 days. Clusters terminated prior to this period + // are not included. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListClustersRequest) ([]ClusterDetails, error) + + // ClusterDetailsClusterNameToClusterIdMap calls [ClustersPreviewAPI.ListAll] and creates a map of results with [ClusterDetails].ClusterName as key and [ClusterDetails].ClusterId as value. + // + // Returns an error if there's more than one [ClusterDetails] with the same .ClusterName. + // + // Note: All [ClusterDetails] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ClusterDetailsClusterNameToClusterIdMap(ctx context.Context, request ListClustersRequest) (map[string]string, error) + + // GetByClusterName calls [ClustersPreviewAPI.ClusterDetailsClusterNameToClusterIdMap] and returns a single [ClusterDetails]. + // + // Returns an error if there's more than one [ClusterDetails] with the same .ClusterName. + // + // Note: All [ClusterDetails] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByClusterName(ctx context.Context, name string) (*ClusterDetails, error) + + // List node types. + // + // Returns a list of supported Spark node types. These node types can be used to + // launch a cluster. + ListNodeTypes(ctx context.Context) (*ListNodeTypesResponse, error) + + // List availability zones. + // + // Returns a list of availability zones where clusters can be created in (For + // example, us-west-2a). These zones can be used to launch a cluster. + ListZones(ctx context.Context) (*ListAvailableZonesResponse, error) + + // Permanently delete cluster. + // + // Permanently deletes a Spark cluster. This cluster is terminated and resources + // are asynchronously removed. + // + // In addition, users will no longer see permanently deleted clusters in the + // cluster list, and API users can no longer perform any action on permanently + // deleted clusters. + PermanentDelete(ctx context.Context, request PermanentDeleteCluster) error + + // Permanently delete cluster. + // + // Permanently deletes a Spark cluster. This cluster is terminated and resources + // are asynchronously removed. + // + // In addition, users will no longer see permanently deleted clusters in the + // cluster list, and API users can no longer perform any action on permanently + // deleted clusters. + PermanentDeleteByClusterId(ctx context.Context, clusterId string) error + + // Pin cluster. + // + // Pinning a cluster ensures that the cluster will always be returned by the + // ListClusters API. Pinning a cluster that is already pinned will have no + // effect. This API can only be called by workspace admins. + Pin(ctx context.Context, request PinCluster) error + + // Pin cluster. + // + // Pinning a cluster ensures that the cluster will always be returned by the + // ListClusters API. Pinning a cluster that is already pinned will have no + // effect. This API can only be called by workspace admins. + PinByClusterId(ctx context.Context, clusterId string) error + + // Resize cluster. + // + // Resizes a cluster to have a desired number of workers. This will fail unless + // the cluster is in a `RUNNING` state. + Resize(ctx context.Context, request ResizeCluster) error + + // Restart cluster. + // + // Restarts a Spark cluster with the supplied ID. If the cluster is not + // currently in a `RUNNING` state, nothing will happen. + Restart(ctx context.Context, request RestartCluster) error + + // Set cluster permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error) + + // List available Spark versions. + // + // Returns the list of available Spark versions. These versions can be used to + // launch a cluster. + SparkVersions(ctx context.Context) (*GetSparkVersionsResponse, error) + + // Start terminated cluster. + // + // Starts a terminated Spark cluster with the supplied ID. This works similar to + // `createCluster` except: + // + // * The previous cluster id and attributes are preserved. * The cluster starts + // with the last specified cluster size. * If the previous cluster was an + // autoscaling cluster, the current cluster starts with the minimum number of + // nodes. * If the cluster is not currently in a `TERMINATED` state, nothing + // will happen. * Clusters launched to run a job cannot be started. + Start(ctx context.Context, request StartCluster) error + + // Start terminated cluster. + // + // Starts a terminated Spark cluster with the supplied ID. This works similar to + // `createCluster` except: + // + // * The previous cluster id and attributes are preserved. * The cluster starts + // with the last specified cluster size. * If the previous cluster was an + // autoscaling cluster, the current cluster starts with the minimum number of + // nodes. * If the cluster is not currently in a `TERMINATED` state, nothing + // will happen. * Clusters launched to run a job cannot be started. + StartByClusterId(ctx context.Context, clusterId string) error + + // Unpin cluster. + // + // Unpinning a cluster will allow the cluster to eventually be removed from the + // ListClusters API. Unpinning a cluster that is not pinned will have no effect. + // This API can only be called by workspace admins. + Unpin(ctx context.Context, request UnpinCluster) error + + // Unpin cluster. + // + // Unpinning a cluster will allow the cluster to eventually be removed from the + // ListClusters API. Unpinning a cluster that is not pinned will have no effect. + // This API can only be called by workspace admins. + UnpinByClusterId(ctx context.Context, clusterId string) error + + // Update cluster configuration (partial). + // + // Updates the configuration of a cluster to match the partial set of attributes + // and size. Denote which fields to update using the `update_mask` field in the + // request body. A cluster can be updated if it is in a `RUNNING` or + // `TERMINATED` state. If a cluster is updated while in a `RUNNING` state, it + // will be restarted so that the new attributes can take effect. If a cluster is + // updated while in a `TERMINATED` state, it will remain `TERMINATED`. The + // updated attributes will take effect the next time the cluster is started + // using the `clusters/start` API. Attempts to update a cluster in any other + // state will be rejected with an `INVALID_STATE` error code. Clusters created + // by the Databricks Jobs service cannot be updated. + Update(ctx context.Context, request UpdateCluster) error + + // Update cluster permissions. + // + // Updates the permissions on a cluster. Clusters can inherit permissions from + // their root object. + UpdatePermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error) +} + +func NewClustersPreview(client *client.DatabricksClient) *ClustersPreviewAPI { + return &ClustersPreviewAPI{ + clustersPreviewImpl: clustersPreviewImpl{ + client: client, + }, + } +} + +// The Clusters API allows you to create, start, edit, list, terminate, and +// delete clusters. +// +// Databricks maps cluster node instance types to compute units known as DBUs. +// See the instance type pricing page for a list of the supported instance types +// and their corresponding DBUs. +// +// A Databricks cluster is a set of computation resources and configurations on +// which you run data engineering, data science, and data analytics workloads, +// such as production ETL pipelines, streaming analytics, ad-hoc analytics, and +// machine learning. +// +// You run these workloads as a set of commands in a notebook or as an automated +// job. Databricks makes a distinction between all-purpose clusters and job +// clusters. You use all-purpose clusters to analyze data collaboratively using +// interactive notebooks. You use job clusters to run fast and robust automated +// jobs. +// +// You can create an all-purpose cluster using the UI, CLI, or REST API. You can +// manually terminate and restart an all-purpose cluster. Multiple users can +// share such clusters to do collaborative interactive analysis. +// +// IMPORTANT: Databricks retains cluster configuration information for +// terminated clusters for 30 days. To keep an all-purpose cluster configuration +// even after it has been terminated for more than 30 days, an administrator can +// pin a cluster to the cluster list. +type ClustersPreviewAPI struct { + clustersPreviewImpl +} + +// Terminate cluster. +// +// Terminates the Spark cluster with the specified ID. The cluster is removed +// asynchronously. Once the termination has completed, the cluster will be in a +// `TERMINATED` state. If the cluster is already in a `TERMINATING` or +// `TERMINATED` state, nothing will happen. +func (a *ClustersPreviewAPI) DeleteByClusterId(ctx context.Context, clusterId string) error { + return a.clustersPreviewImpl.Delete(ctx, DeleteCluster{ + ClusterId: clusterId, + }) +} + +// Get cluster info. +// +// Retrieves the information for a cluster given its identifier. Clusters can be +// described while they are running, or up to 60 days after they are terminated. +func (a *ClustersPreviewAPI) GetByClusterId(ctx context.Context, clusterId string) (*ClusterDetails, error) { + return a.clustersPreviewImpl.Get(ctx, GetClusterRequest{ + ClusterId: clusterId, + }) +} + +// Get cluster permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *ClustersPreviewAPI) GetPermissionLevelsByClusterId(ctx context.Context, clusterId string) (*GetClusterPermissionLevelsResponse, error) { + return a.clustersPreviewImpl.GetPermissionLevels(ctx, GetClusterPermissionLevelsRequest{ + ClusterId: clusterId, + }) +} + +// Get cluster permissions. +// +// Gets the permissions of a cluster. Clusters can inherit permissions from +// their root object. +func (a *ClustersPreviewAPI) GetPermissionsByClusterId(ctx context.Context, clusterId string) (*ClusterPermissions, error) { + return a.clustersPreviewImpl.GetPermissions(ctx, GetClusterPermissionsRequest{ + ClusterId: clusterId, + }) +} + +// ClusterDetailsClusterNameToClusterIdMap calls [ClustersPreviewAPI.ListAll] and creates a map of results with [ClusterDetails].ClusterName as key and [ClusterDetails].ClusterId as value. +// +// Returns an error if there's more than one [ClusterDetails] with the same .ClusterName. +// +// Note: All [ClusterDetails] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ClustersPreviewAPI) ClusterDetailsClusterNameToClusterIdMap(ctx context.Context, request ListClustersRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.ClusterName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .ClusterName: %s", key) + } + mapping[key] = v.ClusterId + } + return mapping, nil +} + +// GetByClusterName calls [ClustersPreviewAPI.ClusterDetailsClusterNameToClusterIdMap] and returns a single [ClusterDetails]. +// +// Returns an error if there's more than one [ClusterDetails] with the same .ClusterName. +// +// Note: All [ClusterDetails] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ClustersPreviewAPI) GetByClusterName(ctx context.Context, name string) (*ClusterDetails, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListClustersRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ClusterDetails{} + for _, v := range result { + key := v.ClusterName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ClusterDetails named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ClusterDetails named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +// Permanently delete cluster. +// +// Permanently deletes a Spark cluster. This cluster is terminated and resources +// are asynchronously removed. +// +// In addition, users will no longer see permanently deleted clusters in the +// cluster list, and API users can no longer perform any action on permanently +// deleted clusters. +func (a *ClustersPreviewAPI) PermanentDeleteByClusterId(ctx context.Context, clusterId string) error { + return a.clustersPreviewImpl.PermanentDelete(ctx, PermanentDeleteCluster{ + ClusterId: clusterId, + }) +} + +// Pin cluster. +// +// Pinning a cluster ensures that the cluster will always be returned by the +// ListClusters API. Pinning a cluster that is already pinned will have no +// effect. This API can only be called by workspace admins. +func (a *ClustersPreviewAPI) PinByClusterId(ctx context.Context, clusterId string) error { + return a.clustersPreviewImpl.Pin(ctx, PinCluster{ + ClusterId: clusterId, + }) +} + +// Start terminated cluster. +// +// Starts a terminated Spark cluster with the supplied ID. This works similar to +// `createCluster` except: +// +// * The previous cluster id and attributes are preserved. * The cluster starts +// with the last specified cluster size. * If the previous cluster was an +// autoscaling cluster, the current cluster starts with the minimum number of +// nodes. * If the cluster is not currently in a `TERMINATED` state, nothing +// will happen. * Clusters launched to run a job cannot be started. +func (a *ClustersPreviewAPI) StartByClusterId(ctx context.Context, clusterId string) error { + return a.clustersPreviewImpl.Start(ctx, StartCluster{ + ClusterId: clusterId, + }) +} + +// Unpin cluster. +// +// Unpinning a cluster will allow the cluster to eventually be removed from the +// ListClusters API. Unpinning a cluster that is not pinned will have no effect. +// This API can only be called by workspace admins. +func (a *ClustersPreviewAPI) UnpinByClusterId(ctx context.Context, clusterId string) error { + return a.clustersPreviewImpl.Unpin(ctx, UnpinCluster{ + ClusterId: clusterId, + }) +} + +type CommandExecutionPreviewInterface interface { + + // Cancel a command. + // + // Cancels a currently running command within an execution context. + // + // The command ID is obtained from a prior successful call to __execute__. + Cancel(ctx context.Context, request CancelCommand) error + + // Get command info. + // + // Gets the status of and, if available, the results from a currently executing + // command. + // + // The command ID is obtained from a prior successful call to __execute__. + CommandStatus(ctx context.Context, request CommandStatusRequest) (*CommandStatusResponse, error) + + // Get status. + // + // Gets the status for an execution context. + ContextStatus(ctx context.Context, request ContextStatusRequest) (*ContextStatusResponse, error) + + // Create an execution context. + // + // Creates an execution context for running cluster commands. + // + // If successful, this method returns the ID of the new execution context. + Create(ctx context.Context, request CreateContext) (*Created, error) + + // Delete an execution context. + // + // Deletes an execution context. + Destroy(ctx context.Context, request DestroyContext) error + + // Run a command. + // + // Runs a cluster command in the given execution context, using the provided + // language. + // + // If successful, it returns an ID for tracking the status of the command's + // execution. + Execute(ctx context.Context, request Command) (*Created, error) +} + +func NewCommandExecutionPreview(client *client.DatabricksClient) *CommandExecutionPreviewAPI { + return &CommandExecutionPreviewAPI{ + commandExecutionPreviewImpl: commandExecutionPreviewImpl{ + client: client, + }, + } +} + +// This API allows execution of Python, Scala, SQL, or R commands on running +// Databricks Clusters. This API only supports (classic) all-purpose clusters. +// Serverless compute is not supported. +type CommandExecutionPreviewAPI struct { + commandExecutionPreviewImpl +} + +type GlobalInitScriptsPreviewInterface interface { + + // Create init script. + // + // Creates a new global init script in this workspace. + Create(ctx context.Context, request GlobalInitScriptCreateRequest) (*CreateResponse, error) + + // Delete init script. + // + // Deletes a global init script. + Delete(ctx context.Context, request DeleteGlobalInitScriptRequest) error + + // Delete init script. + // + // Deletes a global init script. + DeleteByScriptId(ctx context.Context, scriptId string) error + + // Get an init script. + // + // Gets all the details of a script, including its Base64-encoded contents. + Get(ctx context.Context, request GetGlobalInitScriptRequest) (*GlobalInitScriptDetailsWithContent, error) + + // Get an init script. + // + // Gets all the details of a script, including its Base64-encoded contents. + GetByScriptId(ctx context.Context, scriptId string) (*GlobalInitScriptDetailsWithContent, error) + + // Get init scripts. + // + // Get a list of all global init scripts for this workspace. This returns all + // properties for each script but **not** the script contents. To retrieve the + // contents of a script, use the [get a global init + // script](:method:globalinitscripts/get) operation. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context) listing.Iterator[GlobalInitScriptDetails] + + // Get init scripts. + // + // Get a list of all global init scripts for this workspace. This returns all + // properties for each script but **not** the script contents. To retrieve the + // contents of a script, use the [get a global init + // script](:method:globalinitscripts/get) operation. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context) ([]GlobalInitScriptDetails, error) + + // GlobalInitScriptDetailsNameToScriptIdMap calls [GlobalInitScriptsPreviewAPI.ListAll] and creates a map of results with [GlobalInitScriptDetails].Name as key and [GlobalInitScriptDetails].ScriptId as value. + // + // Returns an error if there's more than one [GlobalInitScriptDetails] with the same .Name. + // + // Note: All [GlobalInitScriptDetails] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + GlobalInitScriptDetailsNameToScriptIdMap(ctx context.Context) (map[string]string, error) + + // GetByName calls [GlobalInitScriptsPreviewAPI.GlobalInitScriptDetailsNameToScriptIdMap] and returns a single [GlobalInitScriptDetails]. + // + // Returns an error if there's more than one [GlobalInitScriptDetails] with the same .Name. + // + // Note: All [GlobalInitScriptDetails] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*GlobalInitScriptDetails, error) + + // Update init script. + // + // Updates a global init script, specifying only the fields to change. All + // fields are optional. Unspecified fields retain their current value. + Update(ctx context.Context, request GlobalInitScriptUpdateRequest) error +} + +func NewGlobalInitScriptsPreview(client *client.DatabricksClient) *GlobalInitScriptsPreviewAPI { + return &GlobalInitScriptsPreviewAPI{ + globalInitScriptsPreviewImpl: globalInitScriptsPreviewImpl{ + client: client, + }, + } +} + +// The Global Init Scripts API enables Workspace administrators to configure +// global initialization scripts for their workspace. These scripts run on every +// node in every cluster in the workspace. +// +// **Important:** Existing clusters must be restarted to pick up any changes +// made to global init scripts. Global init scripts are run in order. If the +// init script returns with a bad exit code, the Apache Spark container fails to +// launch and init scripts with later position are skipped. If enough containers +// fail, the entire cluster fails with a `GLOBAL_INIT_SCRIPT_FAILURE` error +// code. +type GlobalInitScriptsPreviewAPI struct { + globalInitScriptsPreviewImpl +} + +// Delete init script. +// +// Deletes a global init script. +func (a *GlobalInitScriptsPreviewAPI) DeleteByScriptId(ctx context.Context, scriptId string) error { + return a.globalInitScriptsPreviewImpl.Delete(ctx, DeleteGlobalInitScriptRequest{ + ScriptId: scriptId, + }) +} + +// Get an init script. +// +// Gets all the details of a script, including its Base64-encoded contents. +func (a *GlobalInitScriptsPreviewAPI) GetByScriptId(ctx context.Context, scriptId string) (*GlobalInitScriptDetailsWithContent, error) { + return a.globalInitScriptsPreviewImpl.Get(ctx, GetGlobalInitScriptRequest{ + ScriptId: scriptId, + }) +} + +// GlobalInitScriptDetailsNameToScriptIdMap calls [GlobalInitScriptsPreviewAPI.ListAll] and creates a map of results with [GlobalInitScriptDetails].Name as key and [GlobalInitScriptDetails].ScriptId as value. +// +// Returns an error if there's more than one [GlobalInitScriptDetails] with the same .Name. +// +// Note: All [GlobalInitScriptDetails] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *GlobalInitScriptsPreviewAPI) GlobalInitScriptDetailsNameToScriptIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.ScriptId + } + return mapping, nil +} + +// GetByName calls [GlobalInitScriptsPreviewAPI.GlobalInitScriptDetailsNameToScriptIdMap] and returns a single [GlobalInitScriptDetails]. +// +// Returns an error if there's more than one [GlobalInitScriptDetails] with the same .Name. +// +// Note: All [GlobalInitScriptDetails] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *GlobalInitScriptsPreviewAPI) GetByName(ctx context.Context, name string) (*GlobalInitScriptDetails, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]GlobalInitScriptDetails{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("GlobalInitScriptDetails named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of GlobalInitScriptDetails named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type InstancePoolsPreviewInterface interface { + + // Create a new instance pool. + // + // Creates a new instance pool using idle and ready-to-use cloud instances. + Create(ctx context.Context, request CreateInstancePool) (*CreateInstancePoolResponse, error) + + // Delete an instance pool. + // + // Deletes the instance pool permanently. The idle instances in the pool are + // terminated asynchronously. + Delete(ctx context.Context, request DeleteInstancePool) error + + // Delete an instance pool. + // + // Deletes the instance pool permanently. The idle instances in the pool are + // terminated asynchronously. + DeleteByInstancePoolId(ctx context.Context, instancePoolId string) error + + // Edit an existing instance pool. + // + // Modifies the configuration of an existing instance pool. + Edit(ctx context.Context, request EditInstancePool) error + + // Get instance pool information. + // + // Retrieve the information for an instance pool based on its identifier. + Get(ctx context.Context, request GetInstancePoolRequest) (*GetInstancePool, error) + + // Get instance pool information. + // + // Retrieve the information for an instance pool based on its identifier. + GetByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePool, error) + + // Get instance pool permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetInstancePoolPermissionLevelsRequest) (*GetInstancePoolPermissionLevelsResponse, error) + + // Get instance pool permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePoolPermissionLevelsResponse, error) + + // Get instance pool permissions. + // + // Gets the permissions of an instance pool. Instance pools can inherit + // permissions from their root object. + GetPermissions(ctx context.Context, request GetInstancePoolPermissionsRequest) (*InstancePoolPermissions, error) + + // Get instance pool permissions. + // + // Gets the permissions of an instance pool. Instance pools can inherit + // permissions from their root object. + GetPermissionsByInstancePoolId(ctx context.Context, instancePoolId string) (*InstancePoolPermissions, error) + + // List instance pool info. + // + // Gets a list of instance pools with their statistics. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context) listing.Iterator[InstancePoolAndStats] + + // List instance pool info. + // + // Gets a list of instance pools with their statistics. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context) ([]InstancePoolAndStats, error) + + // InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap calls [InstancePoolsPreviewAPI.ListAll] and creates a map of results with [InstancePoolAndStats].InstancePoolName as key and [InstancePoolAndStats].InstancePoolId as value. + // + // Returns an error if there's more than one [InstancePoolAndStats] with the same .InstancePoolName. + // + // Note: All [InstancePoolAndStats] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx context.Context) (map[string]string, error) + + // GetByInstancePoolName calls [InstancePoolsPreviewAPI.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap] and returns a single [InstancePoolAndStats]. + // + // Returns an error if there's more than one [InstancePoolAndStats] with the same .InstancePoolName. + // + // Note: All [InstancePoolAndStats] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByInstancePoolName(ctx context.Context, name string) (*InstancePoolAndStats, error) + + // Set instance pool permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error) + + // Update instance pool permissions. + // + // Updates the permissions on an instance pool. Instance pools can inherit + // permissions from their root object. + UpdatePermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error) +} + +func NewInstancePoolsPreview(client *client.DatabricksClient) *InstancePoolsPreviewAPI { + return &InstancePoolsPreviewAPI{ + instancePoolsPreviewImpl: instancePoolsPreviewImpl{ + client: client, + }, + } +} + +// Instance Pools API are used to create, edit, delete and list instance pools +// by using ready-to-use cloud instances which reduces a cluster start and +// auto-scaling times. +// +// Databricks pools reduce cluster start and auto-scaling times by maintaining a +// set of idle, ready-to-use instances. When a cluster is attached to a pool, +// cluster nodes are created using the pool’s idle instances. If the pool has +// no idle instances, the pool expands by allocating a new instance from the +// instance provider in order to accommodate the cluster’s request. When a +// cluster releases an instance, it returns to the pool and is free for another +// cluster to use. Only clusters attached to a pool can use that pool’s idle +// instances. +// +// You can specify a different pool for the driver node and worker nodes, or use +// the same pool for both. +// +// Databricks does not charge DBUs while instances are idle in the pool. +// Instance provider billing does apply. See pricing. +type InstancePoolsPreviewAPI struct { + instancePoolsPreviewImpl +} + +// Delete an instance pool. +// +// Deletes the instance pool permanently. The idle instances in the pool are +// terminated asynchronously. +func (a *InstancePoolsPreviewAPI) DeleteByInstancePoolId(ctx context.Context, instancePoolId string) error { + return a.instancePoolsPreviewImpl.Delete(ctx, DeleteInstancePool{ + InstancePoolId: instancePoolId, + }) +} + +// Get instance pool information. +// +// Retrieve the information for an instance pool based on its identifier. +func (a *InstancePoolsPreviewAPI) GetByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePool, error) { + return a.instancePoolsPreviewImpl.Get(ctx, GetInstancePoolRequest{ + InstancePoolId: instancePoolId, + }) +} + +// Get instance pool permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *InstancePoolsPreviewAPI) GetPermissionLevelsByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePoolPermissionLevelsResponse, error) { + return a.instancePoolsPreviewImpl.GetPermissionLevels(ctx, GetInstancePoolPermissionLevelsRequest{ + InstancePoolId: instancePoolId, + }) +} + +// Get instance pool permissions. +// +// Gets the permissions of an instance pool. Instance pools can inherit +// permissions from their root object. +func (a *InstancePoolsPreviewAPI) GetPermissionsByInstancePoolId(ctx context.Context, instancePoolId string) (*InstancePoolPermissions, error) { + return a.instancePoolsPreviewImpl.GetPermissions(ctx, GetInstancePoolPermissionsRequest{ + InstancePoolId: instancePoolId, + }) +} + +// InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap calls [InstancePoolsPreviewAPI.ListAll] and creates a map of results with [InstancePoolAndStats].InstancePoolName as key and [InstancePoolAndStats].InstancePoolId as value. +// +// Returns an error if there's more than one [InstancePoolAndStats] with the same .InstancePoolName. +// +// Note: All [InstancePoolAndStats] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *InstancePoolsPreviewAPI) InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.InstancePoolName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .InstancePoolName: %s", key) + } + mapping[key] = v.InstancePoolId + } + return mapping, nil +} + +// GetByInstancePoolName calls [InstancePoolsPreviewAPI.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap] and returns a single [InstancePoolAndStats]. +// +// Returns an error if there's more than one [InstancePoolAndStats] with the same .InstancePoolName. +// +// Note: All [InstancePoolAndStats] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *InstancePoolsPreviewAPI) GetByInstancePoolName(ctx context.Context, name string) (*InstancePoolAndStats, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]InstancePoolAndStats{} + for _, v := range result { + key := v.InstancePoolName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("InstancePoolAndStats named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of InstancePoolAndStats named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type InstanceProfilesPreviewInterface interface { + + // Register an instance profile. + // + // In the UI, you can select the instance profile when launching clusters. This + // API is only available to admin users. + Add(ctx context.Context, request AddInstanceProfile) error + + // Edit an instance profile. + // + // The only supported field to change is the optional IAM role ARN associated + // with the instance profile. It is required to specify the IAM role ARN if both + // of the following are true: + // + // * Your role name and instance profile name do not match. The name is the part + // after the last slash in each ARN. * You want to use the instance profile with + // [Databricks SQL Serverless]. + // + // To understand where these fields are in the AWS console, see [Enable + // serverless SQL warehouses]. + // + // This API is only available to admin users. + // + // [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html + // [Enable serverless SQL warehouses]: https://docs.databricks.com/sql/admin/serverless.html + Edit(ctx context.Context, request InstanceProfile) error + + // List available instance profiles. + // + // List the instance profiles that the calling user can use to launch a cluster. + // + // This API is available to all users. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context) listing.Iterator[InstanceProfile] + + // List available instance profiles. + // + // List the instance profiles that the calling user can use to launch a cluster. + // + // This API is available to all users. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context) ([]InstanceProfile, error) + + // Remove the instance profile. + // + // Remove the instance profile with the provided ARN. Existing clusters with + // this instance profile will continue to function. + // + // This API is only accessible to admin users. + Remove(ctx context.Context, request RemoveInstanceProfile) error + + // Remove the instance profile. + // + // Remove the instance profile with the provided ARN. Existing clusters with + // this instance profile will continue to function. + // + // This API is only accessible to admin users. + RemoveByInstanceProfileArn(ctx context.Context, instanceProfileArn string) error +} + +func NewInstanceProfilesPreview(client *client.DatabricksClient) *InstanceProfilesPreviewAPI { + return &InstanceProfilesPreviewAPI{ + instanceProfilesPreviewImpl: instanceProfilesPreviewImpl{ + client: client, + }, + } +} + +// The Instance Profiles API allows admins to add, list, and remove instance +// profiles that users can launch clusters with. Regular users can list the +// instance profiles available to them. See [Secure access to S3 buckets] using +// instance profiles for more information. +// +// [Secure access to S3 buckets]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/instance-profiles.html +type InstanceProfilesPreviewAPI struct { + instanceProfilesPreviewImpl +} + +// Remove the instance profile. +// +// Remove the instance profile with the provided ARN. Existing clusters with +// this instance profile will continue to function. +// +// This API is only accessible to admin users. +func (a *InstanceProfilesPreviewAPI) RemoveByInstanceProfileArn(ctx context.Context, instanceProfileArn string) error { + return a.instanceProfilesPreviewImpl.Remove(ctx, RemoveInstanceProfile{ + InstanceProfileArn: instanceProfileArn, + }) +} + +type LibrariesPreviewInterface interface { + + // Get all statuses. + // + // Get the status of all libraries on all clusters. A status is returned for all + // libraries installed on this cluster via the API or the libraries UI. + // + // This method is generated by Databricks SDK Code Generator. + AllClusterStatuses(ctx context.Context) listing.Iterator[ClusterLibraryStatuses] + + // Get all statuses. + // + // Get the status of all libraries on all clusters. A status is returned for all + // libraries installed on this cluster via the API or the libraries UI. + // + // This method is generated by Databricks SDK Code Generator. + AllClusterStatusesAll(ctx context.Context) ([]ClusterLibraryStatuses, error) + + // Get status. + // + // Get the status of libraries on a cluster. A status is returned for all + // libraries installed on this cluster via the API or the libraries UI. The + // order of returned libraries is as follows: 1. Libraries set to be installed + // on this cluster, in the order that the libraries were added to the cluster, + // are returned first. 2. Libraries that were previously requested to be + // installed on this cluster or, but are now marked for removal, in no + // particular order, are returned last. + // + // This method is generated by Databricks SDK Code Generator. + ClusterStatus(ctx context.Context, request ClusterStatus) listing.Iterator[LibraryFullStatus] + + // Get status. + // + // Get the status of libraries on a cluster. A status is returned for all + // libraries installed on this cluster via the API or the libraries UI. The + // order of returned libraries is as follows: 1. Libraries set to be installed + // on this cluster, in the order that the libraries were added to the cluster, + // are returned first. 2. Libraries that were previously requested to be + // installed on this cluster or, but are now marked for removal, in no + // particular order, are returned last. + // + // This method is generated by Databricks SDK Code Generator. + ClusterStatusAll(ctx context.Context, request ClusterStatus) ([]LibraryFullStatus, error) + + // Get status. + // + // Get the status of libraries on a cluster. A status is returned for all + // libraries installed on this cluster via the API or the libraries UI. The + // order of returned libraries is as follows: 1. Libraries set to be installed + // on this cluster, in the order that the libraries were added to the cluster, + // are returned first. 2. Libraries that were previously requested to be + // installed on this cluster or, but are now marked for removal, in no + // particular order, are returned last. + ClusterStatusByClusterId(ctx context.Context, clusterId string) (*ClusterLibraryStatuses, error) + + // Add a library. + // + // Add libraries to install on a cluster. The installation is asynchronous; it + // happens in the background after the completion of this request. + Install(ctx context.Context, request InstallLibraries) error + + // Uninstall libraries. + // + // Set libraries to uninstall from a cluster. The libraries won't be uninstalled + // until the cluster is restarted. A request to uninstall a library that is not + // currently installed is ignored. + Uninstall(ctx context.Context, request UninstallLibraries) error +} + +func NewLibrariesPreview(client *client.DatabricksClient) *LibrariesPreviewAPI { + return &LibrariesPreviewAPI{ + librariesPreviewImpl: librariesPreviewImpl{ + client: client, + }, + } +} + +// The Libraries API allows you to install and uninstall libraries and get the +// status of libraries on a cluster. +// +// To make third-party or custom code available to notebooks and jobs running on +// your clusters, you can install a library. Libraries can be written in Python, +// Java, Scala, and R. You can upload Python, Java, Scala and R libraries and +// point to external packages in PyPI, Maven, and CRAN repositories. +// +// Cluster libraries can be used by all notebooks running on a cluster. You can +// install a cluster library directly from a public repository such as PyPI or +// Maven, using a previously installed workspace library, or using an init +// script. +// +// When you uninstall a library from a cluster, the library is removed only when +// you restart the cluster. Until you restart the cluster, the status of the +// uninstalled library appears as Uninstall pending restart. +type LibrariesPreviewAPI struct { + librariesPreviewImpl +} + +// Get status. +// +// Get the status of libraries on a cluster. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. The +// order of returned libraries is as follows: 1. Libraries set to be installed +// on this cluster, in the order that the libraries were added to the cluster, +// are returned first. 2. Libraries that were previously requested to be +// installed on this cluster or, but are now marked for removal, in no +// particular order, are returned last. +func (a *LibrariesPreviewAPI) ClusterStatusByClusterId(ctx context.Context, clusterId string) (*ClusterLibraryStatuses, error) { + return a.librariesPreviewImpl.internalClusterStatus(ctx, ClusterStatus{ + ClusterId: clusterId, + }) +} + +type PolicyComplianceForClustersPreviewInterface interface { + + // Enforce cluster policy compliance. + // + // Updates a cluster to be compliant with the current version of its policy. A + // cluster can be updated if it is in a `RUNNING` or `TERMINATED` state. + // + // If a cluster is updated while in a `RUNNING` state, it will be restarted so + // that the new attributes can take effect. + // + // If a cluster is updated while in a `TERMINATED` state, it will remain + // `TERMINATED`. The next time the cluster is started, the new attributes will + // take effect. + // + // Clusters created by the Databricks Jobs, DLT, or Models services cannot be + // enforced by this API. Instead, use the "Enforce job policy compliance" API to + // enforce policy compliance on jobs. + EnforceCompliance(ctx context.Context, request EnforceClusterComplianceRequest) (*EnforceClusterComplianceResponse, error) + + // Get cluster policy compliance. + // + // Returns the policy compliance status of a cluster. Clusters could be out of + // compliance if their policy was updated after the cluster was last edited. + GetCompliance(ctx context.Context, request GetClusterComplianceRequest) (*GetClusterComplianceResponse, error) + + // Get cluster policy compliance. + // + // Returns the policy compliance status of a cluster. Clusters could be out of + // compliance if their policy was updated after the cluster was last edited. + GetComplianceByClusterId(ctx context.Context, clusterId string) (*GetClusterComplianceResponse, error) + + // List cluster policy compliance. + // + // Returns the policy compliance status of all clusters that use a given policy. + // Clusters could be out of compliance if their policy was updated after the + // cluster was last edited. + // + // This method is generated by Databricks SDK Code Generator. + ListCompliance(ctx context.Context, request ListClusterCompliancesRequest) listing.Iterator[ClusterCompliance] + + // List cluster policy compliance. + // + // Returns the policy compliance status of all clusters that use a given policy. + // Clusters could be out of compliance if their policy was updated after the + // cluster was last edited. + // + // This method is generated by Databricks SDK Code Generator. + ListComplianceAll(ctx context.Context, request ListClusterCompliancesRequest) ([]ClusterCompliance, error) +} + +func NewPolicyComplianceForClustersPreview(client *client.DatabricksClient) *PolicyComplianceForClustersPreviewAPI { + return &PolicyComplianceForClustersPreviewAPI{ + policyComplianceForClustersPreviewImpl: policyComplianceForClustersPreviewImpl{ + client: client, + }, + } +} + +// The policy compliance APIs allow you to view and manage the policy compliance +// status of clusters in your workspace. +// +// A cluster is compliant with its policy if its configuration satisfies all its +// policy rules. Clusters could be out of compliance if their policy was updated +// after the cluster was last edited. +// +// The get and list compliance APIs allow you to view the policy compliance +// status of a cluster. The enforce compliance API allows you to update a +// cluster to be compliant with the current version of its policy. +type PolicyComplianceForClustersPreviewAPI struct { + policyComplianceForClustersPreviewImpl +} + +// Get cluster policy compliance. +// +// Returns the policy compliance status of a cluster. Clusters could be out of +// compliance if their policy was updated after the cluster was last edited. +func (a *PolicyComplianceForClustersPreviewAPI) GetComplianceByClusterId(ctx context.Context, clusterId string) (*GetClusterComplianceResponse, error) { + return a.policyComplianceForClustersPreviewImpl.GetCompliance(ctx, GetClusterComplianceRequest{ + ClusterId: clusterId, + }) +} + +type PolicyFamiliesPreviewInterface interface { + + // Get policy family information. + // + // Retrieve the information for an policy family based on its identifier and + // version + Get(ctx context.Context, request GetPolicyFamilyRequest) (*PolicyFamily, error) + + // Get policy family information. + // + // Retrieve the information for an policy family based on its identifier and + // version + GetByPolicyFamilyId(ctx context.Context, policyFamilyId string) (*PolicyFamily, error) + + // List policy families. + // + // Returns the list of policy definition types available to use at their latest + // version. This API is paginated. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListPolicyFamiliesRequest) listing.Iterator[PolicyFamily] + + // List policy families. + // + // Returns the list of policy definition types available to use at their latest + // version. This API is paginated. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListPolicyFamiliesRequest) ([]PolicyFamily, error) +} + +func NewPolicyFamiliesPreview(client *client.DatabricksClient) *PolicyFamiliesPreviewAPI { + return &PolicyFamiliesPreviewAPI{ + policyFamiliesPreviewImpl: policyFamiliesPreviewImpl{ + client: client, + }, + } +} + +// View available policy families. A policy family contains a policy definition +// providing best practices for configuring clusters for a particular use case. +// +// Databricks manages and provides policy families for several common cluster +// use cases. You cannot create, edit, or delete policy families. +// +// Policy families cannot be used directly to create clusters. Instead, you +// create cluster policies using a policy family. Cluster policies created using +// a policy family inherit the policy family's policy definition. +type PolicyFamiliesPreviewAPI struct { + policyFamiliesPreviewImpl +} + +// Get policy family information. +// +// Retrieve the information for an policy family based on its identifier and +// version +func (a *PolicyFamiliesPreviewAPI) GetByPolicyFamilyId(ctx context.Context, policyFamilyId string) (*PolicyFamily, error) { + return a.policyFamiliesPreviewImpl.Get(ctx, GetPolicyFamilyRequest{ + PolicyFamilyId: policyFamilyId, + }) +} diff --git a/compute/v2preview/client.go b/compute/v2preview/client.go new file mode 100755 index 000000000..77a716429 --- /dev/null +++ b/compute/v2preview/client.go @@ -0,0 +1,317 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package computepreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type ClusterPoliciesPreviewClient struct { + ClusterPoliciesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewClusterPoliciesPreviewClient(cfg *config.Config) (*ClusterPoliciesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ClusterPoliciesPreviewClient{ + Config: cfg, + apiClient: apiClient, + ClusterPoliciesPreviewInterface: NewClusterPoliciesPreview(databricksClient), + }, nil +} + +type ClustersPreviewClient struct { + ClustersPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewClustersPreviewClient(cfg *config.Config) (*ClustersPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ClustersPreviewClient{ + Config: cfg, + apiClient: apiClient, + ClustersPreviewInterface: NewClustersPreview(databricksClient), + }, nil +} + +type CommandExecutionPreviewClient struct { + CommandExecutionPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewCommandExecutionPreviewClient(cfg *config.Config) (*CommandExecutionPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &CommandExecutionPreviewClient{ + Config: cfg, + apiClient: apiClient, + CommandExecutionPreviewInterface: NewCommandExecutionPreview(databricksClient), + }, nil +} + +type GlobalInitScriptsPreviewClient struct { + GlobalInitScriptsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewGlobalInitScriptsPreviewClient(cfg *config.Config) (*GlobalInitScriptsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &GlobalInitScriptsPreviewClient{ + Config: cfg, + apiClient: apiClient, + GlobalInitScriptsPreviewInterface: NewGlobalInitScriptsPreview(databricksClient), + }, nil +} + +type InstancePoolsPreviewClient struct { + InstancePoolsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewInstancePoolsPreviewClient(cfg *config.Config) (*InstancePoolsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &InstancePoolsPreviewClient{ + Config: cfg, + apiClient: apiClient, + InstancePoolsPreviewInterface: NewInstancePoolsPreview(databricksClient), + }, nil +} + +type InstanceProfilesPreviewClient struct { + InstanceProfilesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewInstanceProfilesPreviewClient(cfg *config.Config) (*InstanceProfilesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &InstanceProfilesPreviewClient{ + Config: cfg, + apiClient: apiClient, + InstanceProfilesPreviewInterface: NewInstanceProfilesPreview(databricksClient), + }, nil +} + +type LibrariesPreviewClient struct { + LibrariesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewLibrariesPreviewClient(cfg *config.Config) (*LibrariesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &LibrariesPreviewClient{ + Config: cfg, + apiClient: apiClient, + LibrariesPreviewInterface: NewLibrariesPreview(databricksClient), + }, nil +} + +type PolicyComplianceForClustersPreviewClient struct { + PolicyComplianceForClustersPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewPolicyComplianceForClustersPreviewClient(cfg *config.Config) (*PolicyComplianceForClustersPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &PolicyComplianceForClustersPreviewClient{ + Config: cfg, + apiClient: apiClient, + PolicyComplianceForClustersPreviewInterface: NewPolicyComplianceForClustersPreview(databricksClient), + }, nil +} + +type PolicyFamiliesPreviewClient struct { + PolicyFamiliesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewPolicyFamiliesPreviewClient(cfg *config.Config) (*PolicyFamiliesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &PolicyFamiliesPreviewClient{ + Config: cfg, + apiClient: apiClient, + PolicyFamiliesPreviewInterface: NewPolicyFamiliesPreview(databricksClient), + }, nil +} diff --git a/compute/v2preview/impl.go b/compute/v2preview/impl.go new file mode 100755 index 000000000..5855c50d2 --- /dev/null +++ b/compute/v2preview/impl.go @@ -0,0 +1,1070 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package computepreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just ClusterPoliciesPreview API methods +type clusterPoliciesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *clusterPoliciesPreviewImpl) Create(ctx context.Context, request CreatePolicy) (*CreatePolicyResponse, error) { + var createPolicyResponse CreatePolicyResponse + path := "/api/2.0preview/policies/clusters/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createPolicyResponse) + return &createPolicyResponse, err +} + +func (a *clusterPoliciesPreviewImpl) Delete(ctx context.Context, request DeletePolicy) error { + var deletePolicyResponse DeletePolicyResponse + path := "/api/2.0preview/policies/clusters/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deletePolicyResponse) + return err +} + +func (a *clusterPoliciesPreviewImpl) Edit(ctx context.Context, request EditPolicy) error { + var editPolicyResponse EditPolicyResponse + path := "/api/2.0preview/policies/clusters/edit" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &editPolicyResponse) + return err +} + +func (a *clusterPoliciesPreviewImpl) Get(ctx context.Context, request GetClusterPolicyRequest) (*Policy, error) { + var policy Policy + path := "/api/2.0preview/policies/clusters/get" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &policy) + return &policy, err +} + +func (a *clusterPoliciesPreviewImpl) GetPermissionLevels(ctx context.Context, request GetClusterPolicyPermissionLevelsRequest) (*GetClusterPolicyPermissionLevelsResponse, error) { + var getClusterPolicyPermissionLevelsResponse GetClusterPolicyPermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/cluster-policies/%v/permissionLevels", request.ClusterPolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getClusterPolicyPermissionLevelsResponse) + return &getClusterPolicyPermissionLevelsResponse, err +} + +func (a *clusterPoliciesPreviewImpl) GetPermissions(ctx context.Context, request GetClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { + var clusterPolicyPermissions ClusterPolicyPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/cluster-policies/%v", request.ClusterPolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &clusterPolicyPermissions) + return &clusterPolicyPermissions, err +} + +// List cluster policies. +// +// Returns a list of policies accessible by the requesting user. +func (a *clusterPoliciesPreviewImpl) List(ctx context.Context, request ListClusterPoliciesRequest) listing.Iterator[Policy] { + + getNextPage := func(ctx context.Context, req ListClusterPoliciesRequest) (*ListPoliciesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListPoliciesResponse) []Policy { + return resp.Policies + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List cluster policies. +// +// Returns a list of policies accessible by the requesting user. +func (a *clusterPoliciesPreviewImpl) ListAll(ctx context.Context, request ListClusterPoliciesRequest) ([]Policy, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[Policy](ctx, iterator) +} +func (a *clusterPoliciesPreviewImpl) internalList(ctx context.Context, request ListClusterPoliciesRequest) (*ListPoliciesResponse, error) { + var listPoliciesResponse ListPoliciesResponse + path := "/api/2.0preview/policies/clusters/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listPoliciesResponse) + return &listPoliciesResponse, err +} + +func (a *clusterPoliciesPreviewImpl) SetPermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { + var clusterPolicyPermissions ClusterPolicyPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/cluster-policies/%v", request.ClusterPolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &clusterPolicyPermissions) + return &clusterPolicyPermissions, err +} + +func (a *clusterPoliciesPreviewImpl) UpdatePermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { + var clusterPolicyPermissions ClusterPolicyPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/cluster-policies/%v", request.ClusterPolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &clusterPolicyPermissions) + return &clusterPolicyPermissions, err +} + +// unexported type that holds implementations of just ClustersPreview API methods +type clustersPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *clustersPreviewImpl) ChangeOwner(ctx context.Context, request ChangeClusterOwner) error { + var changeClusterOwnerResponse ChangeClusterOwnerResponse + path := "/api/2.1preview/clusters/change-owner" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &changeClusterOwnerResponse) + return err +} + +func (a *clustersPreviewImpl) Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error) { + var createClusterResponse CreateClusterResponse + path := "/api/2.1preview/clusters/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createClusterResponse) + return &createClusterResponse, err +} + +func (a *clustersPreviewImpl) Delete(ctx context.Context, request DeleteCluster) error { + var deleteClusterResponse DeleteClusterResponse + path := "/api/2.1preview/clusters/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteClusterResponse) + return err +} + +func (a *clustersPreviewImpl) Edit(ctx context.Context, request EditCluster) error { + var editClusterResponse EditClusterResponse + path := "/api/2.1preview/clusters/edit" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &editClusterResponse) + return err +} + +// List cluster activity events. +// +// Retrieves a list of events about the activity of a cluster. This API is +// paginated. If there are more events to read, the response includes all the +// nparameters necessary to request the next page of events. +func (a *clustersPreviewImpl) Events(ctx context.Context, request GetEvents) listing.Iterator[ClusterEvent] { + + getNextPage := func(ctx context.Context, req GetEvents) (*GetEventsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalEvents(ctx, req) + } + getItems := func(resp *GetEventsResponse) []ClusterEvent { + return resp.Events + } + getNextReq := func(resp *GetEventsResponse) *GetEvents { + if len(getItems(resp)) == 0 { + return nil + } + request.Offset = resp.Offset + int64(len(resp.Events)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List cluster activity events. +// +// Retrieves a list of events about the activity of a cluster. This API is +// paginated. If there are more events to read, the response includes all the +// nparameters necessary to request the next page of events. +func (a *clustersPreviewImpl) EventsAll(ctx context.Context, request GetEvents) ([]ClusterEvent, error) { + iterator := a.Events(ctx, request) + return listing.ToSliceN[ClusterEvent, int64](ctx, iterator, request.Limit) + +} +func (a *clustersPreviewImpl) internalEvents(ctx context.Context, request GetEvents) (*GetEventsResponse, error) { + var getEventsResponse GetEventsResponse + path := "/api/2.1preview/clusters/events" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &getEventsResponse) + return &getEventsResponse, err +} + +func (a *clustersPreviewImpl) Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error) { + var clusterDetails ClusterDetails + path := "/api/2.1preview/clusters/get" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &clusterDetails) + return &clusterDetails, err +} + +func (a *clustersPreviewImpl) GetPermissionLevels(ctx context.Context, request GetClusterPermissionLevelsRequest) (*GetClusterPermissionLevelsResponse, error) { + var getClusterPermissionLevelsResponse GetClusterPermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/clusters/%v/permissionLevels", request.ClusterId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getClusterPermissionLevelsResponse) + return &getClusterPermissionLevelsResponse, err +} + +func (a *clustersPreviewImpl) GetPermissions(ctx context.Context, request GetClusterPermissionsRequest) (*ClusterPermissions, error) { + var clusterPermissions ClusterPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/clusters/%v", request.ClusterId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &clusterPermissions) + return &clusterPermissions, err +} + +// List clusters. +// +// Return information about all pinned and active clusters, and all clusters +// terminated within the last 30 days. Clusters terminated prior to this period +// are not included. +func (a *clustersPreviewImpl) List(ctx context.Context, request ListClustersRequest) listing.Iterator[ClusterDetails] { + + getNextPage := func(ctx context.Context, req ListClustersRequest) (*ListClustersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListClustersResponse) []ClusterDetails { + return resp.Clusters + } + getNextReq := func(resp *ListClustersResponse) *ListClustersRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List clusters. +// +// Return information about all pinned and active clusters, and all clusters +// terminated within the last 30 days. Clusters terminated prior to this period +// are not included. +func (a *clustersPreviewImpl) ListAll(ctx context.Context, request ListClustersRequest) ([]ClusterDetails, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[ClusterDetails, int](ctx, iterator, request.PageSize) + +} +func (a *clustersPreviewImpl) internalList(ctx context.Context, request ListClustersRequest) (*ListClustersResponse, error) { + var listClustersResponse ListClustersResponse + path := "/api/2.1preview/clusters/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listClustersResponse) + return &listClustersResponse, err +} + +func (a *clustersPreviewImpl) ListNodeTypes(ctx context.Context) (*ListNodeTypesResponse, error) { + var listNodeTypesResponse ListNodeTypesResponse + path := "/api/2.1preview/clusters/list-node-types" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listNodeTypesResponse) + return &listNodeTypesResponse, err +} + +func (a *clustersPreviewImpl) ListZones(ctx context.Context) (*ListAvailableZonesResponse, error) { + var listAvailableZonesResponse ListAvailableZonesResponse + path := "/api/2.1preview/clusters/list-zones" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listAvailableZonesResponse) + return &listAvailableZonesResponse, err +} + +func (a *clustersPreviewImpl) PermanentDelete(ctx context.Context, request PermanentDeleteCluster) error { + var permanentDeleteClusterResponse PermanentDeleteClusterResponse + path := "/api/2.1preview/clusters/permanent-delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &permanentDeleteClusterResponse) + return err +} + +func (a *clustersPreviewImpl) Pin(ctx context.Context, request PinCluster) error { + var pinClusterResponse PinClusterResponse + path := "/api/2.1preview/clusters/pin" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &pinClusterResponse) + return err +} + +func (a *clustersPreviewImpl) Resize(ctx context.Context, request ResizeCluster) error { + var resizeClusterResponse ResizeClusterResponse + path := "/api/2.1preview/clusters/resize" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &resizeClusterResponse) + return err +} + +func (a *clustersPreviewImpl) Restart(ctx context.Context, request RestartCluster) error { + var restartClusterResponse RestartClusterResponse + path := "/api/2.1preview/clusters/restart" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &restartClusterResponse) + return err +} + +func (a *clustersPreviewImpl) SetPermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error) { + var clusterPermissions ClusterPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/clusters/%v", request.ClusterId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &clusterPermissions) + return &clusterPermissions, err +} + +func (a *clustersPreviewImpl) SparkVersions(ctx context.Context) (*GetSparkVersionsResponse, error) { + var getSparkVersionsResponse GetSparkVersionsResponse + path := "/api/2.1preview/clusters/spark-versions" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getSparkVersionsResponse) + return &getSparkVersionsResponse, err +} + +func (a *clustersPreviewImpl) Start(ctx context.Context, request StartCluster) error { + var startClusterResponse StartClusterResponse + path := "/api/2.1preview/clusters/start" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &startClusterResponse) + return err +} + +func (a *clustersPreviewImpl) Unpin(ctx context.Context, request UnpinCluster) error { + var unpinClusterResponse UnpinClusterResponse + path := "/api/2.1preview/clusters/unpin" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &unpinClusterResponse) + return err +} + +func (a *clustersPreviewImpl) Update(ctx context.Context, request UpdateCluster) error { + var updateClusterResponse UpdateClusterResponse + path := "/api/2.1preview/clusters/update" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &updateClusterResponse) + return err +} + +func (a *clustersPreviewImpl) UpdatePermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error) { + var clusterPermissions ClusterPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/clusters/%v", request.ClusterId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &clusterPermissions) + return &clusterPermissions, err +} + +// unexported type that holds implementations of just CommandExecutionPreview API methods +type commandExecutionPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *commandExecutionPreviewImpl) Cancel(ctx context.Context, request CancelCommand) error { + var cancelResponse CancelResponse + path := "/api/1.2preview/commands/cancel" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &cancelResponse) + return err +} + +func (a *commandExecutionPreviewImpl) CommandStatus(ctx context.Context, request CommandStatusRequest) (*CommandStatusResponse, error) { + var commandStatusResponse CommandStatusResponse + path := "/api/1.2preview/commands/status" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &commandStatusResponse) + return &commandStatusResponse, err +} + +func (a *commandExecutionPreviewImpl) ContextStatus(ctx context.Context, request ContextStatusRequest) (*ContextStatusResponse, error) { + var contextStatusResponse ContextStatusResponse + path := "/api/1.2preview/contexts/status" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &contextStatusResponse) + return &contextStatusResponse, err +} + +func (a *commandExecutionPreviewImpl) Create(ctx context.Context, request CreateContext) (*Created, error) { + var created Created + path := "/api/1.2preview/contexts/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &created) + return &created, err +} + +func (a *commandExecutionPreviewImpl) Destroy(ctx context.Context, request DestroyContext) error { + var destroyResponse DestroyResponse + path := "/api/1.2preview/contexts/destroy" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &destroyResponse) + return err +} + +func (a *commandExecutionPreviewImpl) Execute(ctx context.Context, request Command) (*Created, error) { + var created Created + path := "/api/1.2preview/commands/execute" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &created) + return &created, err +} + +// unexported type that holds implementations of just GlobalInitScriptsPreview API methods +type globalInitScriptsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *globalInitScriptsPreviewImpl) Create(ctx context.Context, request GlobalInitScriptCreateRequest) (*CreateResponse, error) { + var createResponse CreateResponse + path := "/api/2.0preview/global-init-scripts" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createResponse) + return &createResponse, err +} + +func (a *globalInitScriptsPreviewImpl) Delete(ctx context.Context, request DeleteGlobalInitScriptRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/global-init-scripts/%v", request.ScriptId) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *globalInitScriptsPreviewImpl) Get(ctx context.Context, request GetGlobalInitScriptRequest) (*GlobalInitScriptDetailsWithContent, error) { + var globalInitScriptDetailsWithContent GlobalInitScriptDetailsWithContent + path := fmt.Sprintf("/api/2.0preview/global-init-scripts/%v", request.ScriptId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &globalInitScriptDetailsWithContent) + return &globalInitScriptDetailsWithContent, err +} + +// Get init scripts. +// +// Get a list of all global init scripts for this workspace. This returns all +// properties for each script but **not** the script contents. To retrieve the +// contents of a script, use the [get a global init +// script](:method:globalinitscripts/get) operation. +func (a *globalInitScriptsPreviewImpl) List(ctx context.Context) listing.Iterator[GlobalInitScriptDetails] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListGlobalInitScriptsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListGlobalInitScriptsResponse) []GlobalInitScriptDetails { + return resp.Scripts + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get init scripts. +// +// Get a list of all global init scripts for this workspace. This returns all +// properties for each script but **not** the script contents. To retrieve the +// contents of a script, use the [get a global init +// script](:method:globalinitscripts/get) operation. +func (a *globalInitScriptsPreviewImpl) ListAll(ctx context.Context) ([]GlobalInitScriptDetails, error) { + iterator := a.List(ctx) + return listing.ToSlice[GlobalInitScriptDetails](ctx, iterator) +} +func (a *globalInitScriptsPreviewImpl) internalList(ctx context.Context) (*ListGlobalInitScriptsResponse, error) { + var listGlobalInitScriptsResponse ListGlobalInitScriptsResponse + path := "/api/2.0preview/global-init-scripts" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listGlobalInitScriptsResponse) + return &listGlobalInitScriptsResponse, err +} + +func (a *globalInitScriptsPreviewImpl) Update(ctx context.Context, request GlobalInitScriptUpdateRequest) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/global-init-scripts/%v", request.ScriptId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just InstancePoolsPreview API methods +type instancePoolsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *instancePoolsPreviewImpl) Create(ctx context.Context, request CreateInstancePool) (*CreateInstancePoolResponse, error) { + var createInstancePoolResponse CreateInstancePoolResponse + path := "/api/2.0preview/instance-pools/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createInstancePoolResponse) + return &createInstancePoolResponse, err +} + +func (a *instancePoolsPreviewImpl) Delete(ctx context.Context, request DeleteInstancePool) error { + var deleteInstancePoolResponse DeleteInstancePoolResponse + path := "/api/2.0preview/instance-pools/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteInstancePoolResponse) + return err +} + +func (a *instancePoolsPreviewImpl) Edit(ctx context.Context, request EditInstancePool) error { + var editInstancePoolResponse EditInstancePoolResponse + path := "/api/2.0preview/instance-pools/edit" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &editInstancePoolResponse) + return err +} + +func (a *instancePoolsPreviewImpl) Get(ctx context.Context, request GetInstancePoolRequest) (*GetInstancePool, error) { + var getInstancePool GetInstancePool + path := "/api/2.0preview/instance-pools/get" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getInstancePool) + return &getInstancePool, err +} + +func (a *instancePoolsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetInstancePoolPermissionLevelsRequest) (*GetInstancePoolPermissionLevelsResponse, error) { + var getInstancePoolPermissionLevelsResponse GetInstancePoolPermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/instance-pools/%v/permissionLevels", request.InstancePoolId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getInstancePoolPermissionLevelsResponse) + return &getInstancePoolPermissionLevelsResponse, err +} + +func (a *instancePoolsPreviewImpl) GetPermissions(ctx context.Context, request GetInstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { + var instancePoolPermissions InstancePoolPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/instance-pools/%v", request.InstancePoolId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &instancePoolPermissions) + return &instancePoolPermissions, err +} + +// List instance pool info. +// +// Gets a list of instance pools with their statistics. +func (a *instancePoolsPreviewImpl) List(ctx context.Context) listing.Iterator[InstancePoolAndStats] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListInstancePools, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListInstancePools) []InstancePoolAndStats { + return resp.InstancePools + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List instance pool info. +// +// Gets a list of instance pools with their statistics. +func (a *instancePoolsPreviewImpl) ListAll(ctx context.Context) ([]InstancePoolAndStats, error) { + iterator := a.List(ctx) + return listing.ToSlice[InstancePoolAndStats](ctx, iterator) +} +func (a *instancePoolsPreviewImpl) internalList(ctx context.Context) (*ListInstancePools, error) { + var listInstancePools ListInstancePools + path := "/api/2.0preview/instance-pools/list" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listInstancePools) + return &listInstancePools, err +} + +func (a *instancePoolsPreviewImpl) SetPermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { + var instancePoolPermissions InstancePoolPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/instance-pools/%v", request.InstancePoolId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &instancePoolPermissions) + return &instancePoolPermissions, err +} + +func (a *instancePoolsPreviewImpl) UpdatePermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { + var instancePoolPermissions InstancePoolPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/instance-pools/%v", request.InstancePoolId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &instancePoolPermissions) + return &instancePoolPermissions, err +} + +// unexported type that holds implementations of just InstanceProfilesPreview API methods +type instanceProfilesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *instanceProfilesPreviewImpl) Add(ctx context.Context, request AddInstanceProfile) error { + var addResponse AddResponse + path := "/api/2.0preview/instance-profiles/add" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &addResponse) + return err +} + +func (a *instanceProfilesPreviewImpl) Edit(ctx context.Context, request InstanceProfile) error { + var editResponse EditResponse + path := "/api/2.0preview/instance-profiles/edit" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &editResponse) + return err +} + +// List available instance profiles. +// +// List the instance profiles that the calling user can use to launch a cluster. +// +// This API is available to all users. +func (a *instanceProfilesPreviewImpl) List(ctx context.Context) listing.Iterator[InstanceProfile] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListInstanceProfilesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListInstanceProfilesResponse) []InstanceProfile { + return resp.InstanceProfiles + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List available instance profiles. +// +// List the instance profiles that the calling user can use to launch a cluster. +// +// This API is available to all users. +func (a *instanceProfilesPreviewImpl) ListAll(ctx context.Context) ([]InstanceProfile, error) { + iterator := a.List(ctx) + return listing.ToSlice[InstanceProfile](ctx, iterator) +} +func (a *instanceProfilesPreviewImpl) internalList(ctx context.Context) (*ListInstanceProfilesResponse, error) { + var listInstanceProfilesResponse ListInstanceProfilesResponse + path := "/api/2.0preview/instance-profiles/list" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listInstanceProfilesResponse) + return &listInstanceProfilesResponse, err +} + +func (a *instanceProfilesPreviewImpl) Remove(ctx context.Context, request RemoveInstanceProfile) error { + var removeResponse RemoveResponse + path := "/api/2.0preview/instance-profiles/remove" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &removeResponse) + return err +} + +// unexported type that holds implementations of just LibrariesPreview API methods +type librariesPreviewImpl struct { + client *client.DatabricksClient +} + +// Get all statuses. +// +// Get the status of all libraries on all clusters. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. +func (a *librariesPreviewImpl) AllClusterStatuses(ctx context.Context) listing.Iterator[ClusterLibraryStatuses] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListAllClusterLibraryStatusesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalAllClusterStatuses(ctx) + } + getItems := func(resp *ListAllClusterLibraryStatusesResponse) []ClusterLibraryStatuses { + return resp.Statuses + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all statuses. +// +// Get the status of all libraries on all clusters. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. +func (a *librariesPreviewImpl) AllClusterStatusesAll(ctx context.Context) ([]ClusterLibraryStatuses, error) { + iterator := a.AllClusterStatuses(ctx) + return listing.ToSlice[ClusterLibraryStatuses](ctx, iterator) +} +func (a *librariesPreviewImpl) internalAllClusterStatuses(ctx context.Context) (*ListAllClusterLibraryStatusesResponse, error) { + var listAllClusterLibraryStatusesResponse ListAllClusterLibraryStatusesResponse + path := "/api/2.0preview/libraries/all-cluster-statuses" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listAllClusterLibraryStatusesResponse) + return &listAllClusterLibraryStatusesResponse, err +} + +// Get status. +// +// Get the status of libraries on a cluster. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. The +// order of returned libraries is as follows: 1. Libraries set to be installed +// on this cluster, in the order that the libraries were added to the cluster, +// are returned first. 2. Libraries that were previously requested to be +// installed on this cluster or, but are now marked for removal, in no +// particular order, are returned last. +func (a *librariesPreviewImpl) ClusterStatus(ctx context.Context, request ClusterStatus) listing.Iterator[LibraryFullStatus] { + + getNextPage := func(ctx context.Context, req ClusterStatus) (*ClusterLibraryStatuses, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalClusterStatus(ctx, req) + } + getItems := func(resp *ClusterLibraryStatuses) []LibraryFullStatus { + return resp.LibraryStatuses + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get status. +// +// Get the status of libraries on a cluster. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. The +// order of returned libraries is as follows: 1. Libraries set to be installed +// on this cluster, in the order that the libraries were added to the cluster, +// are returned first. 2. Libraries that were previously requested to be +// installed on this cluster or, but are now marked for removal, in no +// particular order, are returned last. +func (a *librariesPreviewImpl) ClusterStatusAll(ctx context.Context, request ClusterStatus) ([]LibraryFullStatus, error) { + iterator := a.ClusterStatus(ctx, request) + return listing.ToSlice[LibraryFullStatus](ctx, iterator) +} +func (a *librariesPreviewImpl) internalClusterStatus(ctx context.Context, request ClusterStatus) (*ClusterLibraryStatuses, error) { + var clusterLibraryStatuses ClusterLibraryStatuses + path := "/api/2.0preview/libraries/cluster-status" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &clusterLibraryStatuses) + return &clusterLibraryStatuses, err +} + +func (a *librariesPreviewImpl) Install(ctx context.Context, request InstallLibraries) error { + var installLibrariesResponse InstallLibrariesResponse + path := "/api/2.0preview/libraries/install" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &installLibrariesResponse) + return err +} + +func (a *librariesPreviewImpl) Uninstall(ctx context.Context, request UninstallLibraries) error { + var uninstallLibrariesResponse UninstallLibrariesResponse + path := "/api/2.0preview/libraries/uninstall" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &uninstallLibrariesResponse) + return err +} + +// unexported type that holds implementations of just PolicyComplianceForClustersPreview API methods +type policyComplianceForClustersPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *policyComplianceForClustersPreviewImpl) EnforceCompliance(ctx context.Context, request EnforceClusterComplianceRequest) (*EnforceClusterComplianceResponse, error) { + var enforceClusterComplianceResponse EnforceClusterComplianceResponse + path := "/api/2.0preview/policies/clusters/enforce-compliance" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &enforceClusterComplianceResponse) + return &enforceClusterComplianceResponse, err +} + +func (a *policyComplianceForClustersPreviewImpl) GetCompliance(ctx context.Context, request GetClusterComplianceRequest) (*GetClusterComplianceResponse, error) { + var getClusterComplianceResponse GetClusterComplianceResponse + path := "/api/2.0preview/policies/clusters/get-compliance" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getClusterComplianceResponse) + return &getClusterComplianceResponse, err +} + +// List cluster policy compliance. +// +// Returns the policy compliance status of all clusters that use a given policy. +// Clusters could be out of compliance if their policy was updated after the +// cluster was last edited. +func (a *policyComplianceForClustersPreviewImpl) ListCompliance(ctx context.Context, request ListClusterCompliancesRequest) listing.Iterator[ClusterCompliance] { + + getNextPage := func(ctx context.Context, req ListClusterCompliancesRequest) (*ListClusterCompliancesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListCompliance(ctx, req) + } + getItems := func(resp *ListClusterCompliancesResponse) []ClusterCompliance { + return resp.Clusters + } + getNextReq := func(resp *ListClusterCompliancesResponse) *ListClusterCompliancesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List cluster policy compliance. +// +// Returns the policy compliance status of all clusters that use a given policy. +// Clusters could be out of compliance if their policy was updated after the +// cluster was last edited. +func (a *policyComplianceForClustersPreviewImpl) ListComplianceAll(ctx context.Context, request ListClusterCompliancesRequest) ([]ClusterCompliance, error) { + iterator := a.ListCompliance(ctx, request) + return listing.ToSlice[ClusterCompliance](ctx, iterator) +} +func (a *policyComplianceForClustersPreviewImpl) internalListCompliance(ctx context.Context, request ListClusterCompliancesRequest) (*ListClusterCompliancesResponse, error) { + var listClusterCompliancesResponse ListClusterCompliancesResponse + path := "/api/2.0preview/policies/clusters/list-compliance" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listClusterCompliancesResponse) + return &listClusterCompliancesResponse, err +} + +// unexported type that holds implementations of just PolicyFamiliesPreview API methods +type policyFamiliesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *policyFamiliesPreviewImpl) Get(ctx context.Context, request GetPolicyFamilyRequest) (*PolicyFamily, error) { + var policyFamily PolicyFamily + path := fmt.Sprintf("/api/2.0preview/policy-families/%v", request.PolicyFamilyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &policyFamily) + return &policyFamily, err +} + +// List policy families. +// +// Returns the list of policy definition types available to use at their latest +// version. This API is paginated. +func (a *policyFamiliesPreviewImpl) List(ctx context.Context, request ListPolicyFamiliesRequest) listing.Iterator[PolicyFamily] { + + getNextPage := func(ctx context.Context, req ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListPolicyFamiliesResponse) []PolicyFamily { + return resp.PolicyFamilies + } + getNextReq := func(resp *ListPolicyFamiliesResponse) *ListPolicyFamiliesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List policy families. +// +// Returns the list of policy definition types available to use at their latest +// version. This API is paginated. +func (a *policyFamiliesPreviewImpl) ListAll(ctx context.Context, request ListPolicyFamiliesRequest) ([]PolicyFamily, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[PolicyFamily](ctx, iterator) +} +func (a *policyFamiliesPreviewImpl) internalList(ctx context.Context, request ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error) { + var listPolicyFamiliesResponse ListPolicyFamiliesResponse + path := "/api/2.0preview/policy-families" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listPolicyFamiliesResponse) + return &listPolicyFamiliesResponse, err +} diff --git a/compute/v2preview/model.go b/compute/v2preview/model.go new file mode 100755 index 000000000..73b6d8bab --- /dev/null +++ b/compute/v2preview/model.go @@ -0,0 +1,5492 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package computepreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type AddInstanceProfile struct { + // The AWS IAM role ARN of the role associated with the instance profile. + // This field is required if your role name and instance profile name do not + // match and you want to use the instance profile with [Databricks SQL + // Serverless]. + // + // Otherwise, this field is optional. + // + // [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html + IamRoleArn string `json:"iam_role_arn,omitempty"` + // The AWS ARN of the instance profile to register with Databricks. This + // field is required. + InstanceProfileArn string `json:"instance_profile_arn"` + // Boolean flag indicating whether the instance profile should only be used + // in credential passthrough scenarios. If true, it means the instance + // profile contains an meta IAM role which could assume a wide range of + // roles. Therefore it should always be used with authorization. This field + // is optional, the default value is `false`. + IsMetaInstanceProfile bool `json:"is_meta_instance_profile,omitempty"` + // By default, Databricks validates that it has sufficient permissions to + // launch instances with the instance profile. This validation uses AWS + // dry-run mode for the RunInstances API. If validation fails with an error + // message that does not indicate an IAM related permission issue, (e.g. + // “Your requested instance type is not supported in your requested + // availability zone”), you can pass this flag to skip the validation and + // forcibly add the instance profile. + SkipValidation bool `json:"skip_validation,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AddInstanceProfile) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AddInstanceProfile) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AddResponse struct { +} + +type Adlsgen2Info struct { + // abfss destination, e.g. + // `abfss://@.dfs.core.windows.net/`. + Destination string `json:"destination"` +} + +type AutoScale struct { + // The maximum number of workers to which the cluster can scale up when + // overloaded. Note that `max_workers` must be strictly greater than + // `min_workers`. + MaxWorkers int `json:"max_workers,omitempty"` + // The minimum number of workers to which the cluster can scale down when + // underutilized. It is also the initial number of workers the cluster will + // have after creation. + MinWorkers int `json:"min_workers,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AutoScale) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AutoScale) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AwsAttributes struct { + // Availability type used for all subsequent nodes past the + // `first_on_demand` ones. + // + // Note: If `first_on_demand` is zero, this availability type will be used + // for the entire cluster. + Availability AwsAvailability `json:"availability,omitempty"` + // The number of volumes launched for each instance. Users can choose up to + // 10 volumes. This feature is only enabled for supported node types. Legacy + // node types cannot specify custom EBS volumes. For node types with no + // instance store, at least one EBS volume needs to be specified; otherwise, + // cluster creation will fail. + // + // These EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc. Instance + // store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc. + // + // If EBS volumes are attached, Databricks will configure Spark to use only + // the EBS volumes for scratch storage because heterogenously sized scratch + // devices can lead to inefficient disk utilization. If no EBS volumes are + // attached, Databricks will configure Spark to use instance store volumes. + // + // Please note that if EBS volumes are specified, then the Spark + // configuration `spark.local.dir` will be overridden. + EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + // If using gp3 volumes, what IOPS to use for the disk. If this is not set, + // the maximum performance of a gp2 volume with the same volume size will be + // used. + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` + // The size of each EBS volume (in GiB) launched for each instance. For + // general purpose SSD, this value must be within the range 100 - 4096. For + // throughput optimized HDD, this value must be within the range 500 - 4096. + EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + // If using gp3 volumes, what throughput to use for the disk. If this is not + // set, the maximum performance of a gp2 volume with the same volume size + // will be used. + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` + // The type of EBS volumes that will be launched with this cluster. + EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty"` + // The first `first_on_demand` nodes of the cluster will be placed on + // on-demand instances. If this value is greater than 0, the cluster driver + // node in particular will be placed on an on-demand instance. If this value + // is greater than or equal to the current cluster size, all nodes will be + // placed on on-demand instances. If this value is less than the current + // cluster size, `first_on_demand` nodes will be placed on on-demand + // instances and the remainder will be placed on `availability` instances. + // Note that this value does not affect cluster size and cannot currently be + // mutated over the lifetime of a cluster. + FirstOnDemand int `json:"first_on_demand,omitempty"` + // Nodes for this cluster will only be placed on AWS instances with this + // instance profile. If ommitted, nodes will be placed on instances without + // an IAM instance profile. The instance profile must have previously been + // added to the Databricks environment by an account administrator. + // + // This feature may only be available to certain customer plans. + // + // If this field is ommitted, we will pull in the default from the conf if + // it exists. + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // The bid price for AWS spot instances, as a percentage of the + // corresponding instance type's on-demand price. For example, if this field + // is set to 50, and the cluster needs a new `r3.xlarge` spot instance, then + // the bid price is half of the price of on-demand `r3.xlarge` instances. + // Similarly, if this field is set to 200, the bid price is twice the price + // of on-demand `r3.xlarge` instances. If not specified, the default value + // is 100. When spot instances are requested for this cluster, only spot + // instances whose bid price percentage matches this field will be + // considered. Note that, for safety, we enforce this field to be no more + // than 10000. + // + // The default value and documentation here should be kept consistent with + // CommonConf.defaultSpotBidPricePercent and + // CommonConf.maxSpotBidPricePercent. + SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` + // Identifier for the availability zone/datacenter in which the cluster + // resides. This string will be of a form like "us-west-2a". The provided + // availability zone must be in the same region as the Databricks + // deployment. For example, "us-west-2a" is not a valid zone id if the + // Databricks deployment resides in the "us-east-1" region. This is an + // optional field at cluster creation, and if not specified, a default zone + // will be used. If the zone specified is "auto", will try to place cluster + // in a zone with high availability, and will retry placement in a different + // AZ if there is not enough capacity. The list of available zones as well + // as the default value can be found by using the `List Zones` method. + ZoneId string `json:"zone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AwsAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AwsAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Availability type used for all subsequent nodes past the `first_on_demand` +// ones. +// +// Note: If `first_on_demand` is zero, this availability type will be used for +// the entire cluster. +type AwsAvailability string + +const AwsAvailabilityOnDemand AwsAvailability = `ON_DEMAND` + +const AwsAvailabilitySpot AwsAvailability = `SPOT` + +const AwsAvailabilitySpotWithFallback AwsAvailability = `SPOT_WITH_FALLBACK` + +// String representation for [fmt.Print] +func (f *AwsAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AwsAvailability) Set(v string) error { + switch v { + case `ON_DEMAND`, `SPOT`, `SPOT_WITH_FALLBACK`: + *f = AwsAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND", "SPOT", "SPOT_WITH_FALLBACK"`, v) + } +} + +// Type always returns AwsAvailability to satisfy [pflag.Value] interface +func (f *AwsAvailability) Type() string { + return "AwsAvailability" +} + +type AzureAttributes struct { + // Availability type used for all subsequent nodes past the + // `first_on_demand` ones. Note: If `first_on_demand` is zero (which only + // happens on pool clusters), this availability type will be used for the + // entire cluster. + Availability AzureAvailability `json:"availability,omitempty"` + // The first `first_on_demand` nodes of the cluster will be placed on + // on-demand instances. This value should be greater than 0, to make sure + // the cluster driver node is placed on an on-demand instance. If this value + // is greater than or equal to the current cluster size, all nodes will be + // placed on on-demand instances. If this value is less than the current + // cluster size, `first_on_demand` nodes will be placed on on-demand + // instances and the remainder will be placed on `availability` instances. + // Note that this value does not affect cluster size and cannot currently be + // mutated over the lifetime of a cluster. + FirstOnDemand int `json:"first_on_demand,omitempty"` + // Defines values necessary to configure and run Azure Log Analytics agent + LogAnalyticsInfo *LogAnalyticsInfo `json:"log_analytics_info,omitempty"` + // The max bid price to be used for Azure spot instances. The Max price for + // the bid cannot be higher than the on-demand price of the instance. If not + // specified, the default value is -1, which specifies that the instance + // cannot be evicted on the basis of price, and only on the basis of + // availability. Further, the value should > 0 or -1. + SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AzureAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AzureAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Availability type used for all subsequent nodes past the `first_on_demand` +// ones. Note: If `first_on_demand` is zero (which only happens on pool +// clusters), this availability type will be used for the entire cluster. +type AzureAvailability string + +const AzureAvailabilityOnDemandAzure AzureAvailability = `ON_DEMAND_AZURE` + +const AzureAvailabilitySpotAzure AzureAvailability = `SPOT_AZURE` + +const AzureAvailabilitySpotWithFallbackAzure AzureAvailability = `SPOT_WITH_FALLBACK_AZURE` + +// String representation for [fmt.Print] +func (f *AzureAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AzureAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_AZURE`, `SPOT_AZURE`, `SPOT_WITH_FALLBACK_AZURE`: + *f = AzureAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_AZURE", "SPOT_AZURE", "SPOT_WITH_FALLBACK_AZURE"`, v) + } +} + +// Type always returns AzureAvailability to satisfy [pflag.Value] interface +func (f *AzureAvailability) Type() string { + return "AzureAvailability" +} + +type CancelCommand struct { + ClusterId string `json:"clusterId,omitempty"` + + CommandId string `json:"commandId,omitempty"` + + ContextId string `json:"contextId,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CancelCommand) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CancelCommand) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CancelResponse struct { +} + +type ChangeClusterOwner struct { + // + ClusterId string `json:"cluster_id"` + // New owner of the cluster_id after this RPC. + OwnerUsername string `json:"owner_username"` +} + +type ChangeClusterOwnerResponse struct { +} + +type ClientsTypes struct { + // With jobs set, the cluster can be used for jobs + Jobs bool `json:"jobs,omitempty"` + // With notebooks set, this cluster can be used for notebooks + Notebooks bool `json:"notebooks,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClientsTypes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClientsTypes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CloneCluster struct { + // The cluster that is being cloned. + SourceClusterId string `json:"source_cluster_id"` +} + +type CloudProviderNodeInfo struct { + Status []CloudProviderNodeStatus `json:"status,omitempty"` +} + +type CloudProviderNodeStatus string + +const CloudProviderNodeStatusNotAvailableInRegion CloudProviderNodeStatus = `NotAvailableInRegion` + +const CloudProviderNodeStatusNotEnabledOnSubscription CloudProviderNodeStatus = `NotEnabledOnSubscription` + +// String representation for [fmt.Print] +func (f *CloudProviderNodeStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CloudProviderNodeStatus) Set(v string) error { + switch v { + case `NotAvailableInRegion`, `NotEnabledOnSubscription`: + *f = CloudProviderNodeStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "NotAvailableInRegion", "NotEnabledOnSubscription"`, v) + } +} + +// Type always returns CloudProviderNodeStatus to satisfy [pflag.Value] interface +func (f *CloudProviderNodeStatus) Type() string { + return "CloudProviderNodeStatus" +} + +type ClusterAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel ClusterPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterAccessControlResponse struct { + // All permissions. + AllPermissions []ClusterPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterAttributes struct { + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` + // The configuration for delivering spark logs to a long-term storage + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName string `json:"cluster_name,omitempty"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // The following modes can only be used with `kind`. * + // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate + // access mode depending on your compute configuration. * + // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * + // `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. + // + // The following modes can be used regardless of `kind`. * `NONE`: No + // security isolation for multiple users sharing the cluster. Data + // governance features are not available in this mode. * `SINGLE_USER`: A + // secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` + + DockerImage *DockerImage `json:"docker_image,omitempty"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId string `json:"instance_pool_id,omitempty"` + // This field can only be used with `kind`. + // + // When set to true, Databricks will automatically set single node related + // `custom_tags`, `spark_conf`, and `num_workers` + IsSingleNode bool `json:"is_single_node,omitempty"` + // The kind of compute described by this compute specification. + // + // Depending on `kind`, different validations and default values will be + // applied. + // + // The first usage of this value is for the simple cluster form where it + // sets `kind = CLASSIC_PREVIEW`. + Kind Kind `json:"kind,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id,omitempty"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId string `json:"policy_id,omitempty"` + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. + RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName string `json:"single_user_name,omitempty"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]string `json:"spark_conf,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion string `json:"spark_version"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + // This field can only be used with `kind`. + // + // `effective_spark_version` is determined by `spark_version` (DBR release), + // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or + // not. + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` + + WorkloadType *WorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterCompliance struct { + // Canonical unique identifier for a cluster. + ClusterId string `json:"cluster_id"` + // Whether this cluster is in compliance with the latest version of its + // policy. + IsCompliant bool `json:"is_compliant,omitempty"` + // An object containing key-value mappings representing the first 200 policy + // validation errors. The keys indicate the path where the policy validation + // error is occurring. The values indicate an error message describing the + // policy validation error. + Violations map[string]string `json:"violations,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterCompliance) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterCompliance) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterDetails struct { + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `json:"autoscale,omitempty"` + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` + // Number of CPU cores available for this cluster. Note that this can be + // fractional, e.g. 7.5 cores, since certain node types are configured to + // share cores between Spark nodes on the same instance. + ClusterCores float64 `json:"cluster_cores,omitempty"` + // Canonical identifier for the cluster. This id is retained during cluster + // restarts and resizes, while each new cluster has a globally unique id. + ClusterId string `json:"cluster_id,omitempty"` + // The configuration for delivering spark logs to a long-term storage + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` + // Cluster log delivery status. + ClusterLogStatus *LogSyncStatus `json:"cluster_log_status,omitempty"` + // Total amount of cluster memory, in megabytes + ClusterMemoryMb int64 `json:"cluster_memory_mb,omitempty"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName string `json:"cluster_name,omitempty"` + // Determines whether the cluster was created by a user through the UI, + // created by the Databricks Jobs Scheduler, or through an API request. This + // is the same as cluster_creator, but read only. + ClusterSource ClusterSource `json:"cluster_source,omitempty"` + // Creator user name. The field won't be included in the response if the + // user has already been deleted. + CreatorUserName string `json:"creator_user_name,omitempty"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // The following modes can only be used with `kind`. * + // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate + // access mode depending on your compute configuration. * + // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * + // `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. + // + // The following modes can be used regardless of `kind`. * `NONE`: No + // security isolation for multiple users sharing the cluster. Data + // governance features are not available in this mode. * `SINGLE_USER`: A + // secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` + // Tags that are added by Databricks regardless of any `custom_tags`, + // including: + // + // - Vendor: Databricks + // + // - Creator: + // + // - ClusterName: + // + // - ClusterId: + // + // - Name: + DefaultTags map[string]string `json:"default_tags,omitempty"` + + DockerImage *DockerImage `json:"docker_image,omitempty"` + // Node on which the Spark driver resides. The driver node contains the + // Spark master and the Databricks application that manages the per-notebook + // Spark REPLs. + Driver *SparkNode `json:"driver,omitempty"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + // Nodes on which the Spark executors reside. + Executors []SparkNode `json:"executors,omitempty"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId string `json:"instance_pool_id,omitempty"` + // This field can only be used with `kind`. + // + // When set to true, Databricks will automatically set single node related + // `custom_tags`, `spark_conf`, and `num_workers` + IsSingleNode bool `json:"is_single_node,omitempty"` + // Port on which Spark JDBC server is listening, in the driver nod. No + // service will be listeningon on this port in executor nodes. + JdbcPort int `json:"jdbc_port,omitempty"` + // The kind of compute described by this compute specification. + // + // Depending on `kind`, different validations and default values will be + // applied. + // + // The first usage of this value is for the simple cluster form where it + // sets `kind = CLASSIC_PREVIEW`. + Kind Kind `json:"kind,omitempty"` + // the timestamp that the cluster was started/restarted + LastRestartedTime int64 `json:"last_restarted_time,omitempty"` + // Time when the cluster driver last lost its state (due to a restart or + // driver failure). + LastStateLossTime int64 `json:"last_state_loss_time,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id,omitempty"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers int `json:"num_workers,omitempty"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId string `json:"policy_id,omitempty"` + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. + RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName string `json:"single_user_name,omitempty"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]string `json:"spark_conf,omitempty"` + // A canonical SparkContext identifier. This value *does* change when the + // Spark driver restarts. The pair `(cluster_id, spark_context_id)` is a + // globally unique identifier over all Spark contexts. + SparkContextId int64 `json:"spark_context_id,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion string `json:"spark_version,omitempty"` + // `spec` contains a snapshot of the field values that were used to create + // or edit this cluster. The contents of `spec` can be used in the body of a + // create cluster request. This field might not be populated for older + // clusters. Note: not included in the response of the ListClusters API. + Spec *ClusterSpec `json:"spec,omitempty"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + // Time (in epoch milliseconds) when the cluster creation request was + // received (when the cluster entered a `PENDING` state). + StartTime int64 `json:"start_time,omitempty"` + // Current state of the cluster. + State State `json:"state,omitempty"` + // A message associated with the most recent state transition (e.g., the + // reason why the cluster entered a `TERMINATED` state). + StateMessage string `json:"state_message,omitempty"` + // Time (in epoch milliseconds) when the cluster was terminated, if + // applicable. + TerminatedTime int64 `json:"terminated_time,omitempty"` + // Information about why the cluster was terminated. This field only appears + // when the cluster is in a `TERMINATING` or `TERMINATED` state. + TerminationReason *TerminationReason `json:"termination_reason,omitempty"` + // This field can only be used with `kind`. + // + // `effective_spark_version` is determined by `spark_version` (DBR release), + // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or + // not. + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` + + WorkloadType *WorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterEvent struct { + // + ClusterId string `json:"cluster_id"` + // + DataPlaneEventDetails *DataPlaneEventDetails `json:"data_plane_event_details,omitempty"` + // + Details *EventDetails `json:"details,omitempty"` + // The timestamp when the event occurred, stored as the number of + // milliseconds since the Unix epoch. If not provided, this will be assigned + // by the Timeline service. + Timestamp int64 `json:"timestamp,omitempty"` + + Type EventType `json:"type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterEvent) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterEvent) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterLibraryStatuses struct { + // Unique identifier for the cluster. + ClusterId string `json:"cluster_id,omitempty"` + // Status of all libraries on the cluster. + LibraryStatuses []LibraryFullStatus `json:"library_statuses,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterLibraryStatuses) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterLibraryStatuses) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterLogConf struct { + // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : + // "dbfs:/home/cluster_log" } }` + Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` + // destination and either the region or endpoint need to be provided. e.g. + // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : + // "us-west-2" } }` Cluster iam role is used to access s3, please make sure + // the cluster iam role in `instance_profile_arn` has permission to write + // data to the s3 destination. + S3 *S3StorageInfo `json:"s3,omitempty"` + // destination needs to be provided. e.g. `{ "volumes" : { "destination" : + // "/Volumes/catalog/schema/volume/cluster_log" } }` + Volumes *VolumesStorageInfo `json:"volumes,omitempty"` +} + +type ClusterPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel ClusterPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type ClusterPermissionLevel string + +const ClusterPermissionLevelCanAttachTo ClusterPermissionLevel = `CAN_ATTACH_TO` + +const ClusterPermissionLevelCanManage ClusterPermissionLevel = `CAN_MANAGE` + +const ClusterPermissionLevelCanRestart ClusterPermissionLevel = `CAN_RESTART` + +// String representation for [fmt.Print] +func (f *ClusterPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ClusterPermissionLevel) Set(v string) error { + switch v { + case `CAN_ATTACH_TO`, `CAN_MANAGE`, `CAN_RESTART`: + *f = ClusterPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_ATTACH_TO", "CAN_MANAGE", "CAN_RESTART"`, v) + } +} + +// Type always returns ClusterPermissionLevel to satisfy [pflag.Value] interface +func (f *ClusterPermissionLevel) Type() string { + return "ClusterPermissionLevel" +} + +type ClusterPermissions struct { + AccessControlList []ClusterAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel ClusterPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterPermissionsRequest struct { + AccessControlList []ClusterAccessControlRequest `json:"access_control_list,omitempty"` + // The cluster for which to get or manage permissions. + ClusterId string `json:"-" url:"-"` +} + +type ClusterPolicyAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel ClusterPolicyPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterPolicyAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterPolicyAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterPolicyAccessControlResponse struct { + // All permissions. + AllPermissions []ClusterPolicyPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterPolicyAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterPolicyAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterPolicyPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel ClusterPolicyPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterPolicyPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterPolicyPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type ClusterPolicyPermissionLevel string + +const ClusterPolicyPermissionLevelCanUse ClusterPolicyPermissionLevel = `CAN_USE` + +// String representation for [fmt.Print] +func (f *ClusterPolicyPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ClusterPolicyPermissionLevel) Set(v string) error { + switch v { + case `CAN_USE`: + *f = ClusterPolicyPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_USE"`, v) + } +} + +// Type always returns ClusterPolicyPermissionLevel to satisfy [pflag.Value] interface +func (f *ClusterPolicyPermissionLevel) Type() string { + return "ClusterPolicyPermissionLevel" +} + +type ClusterPolicyPermissions struct { + AccessControlList []ClusterPolicyAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterPolicyPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterPolicyPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterPolicyPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel ClusterPolicyPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterPolicyPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterPolicyPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterPolicyPermissionsRequest struct { + AccessControlList []ClusterPolicyAccessControlRequest `json:"access_control_list,omitempty"` + // The cluster policy for which to get or manage permissions. + ClusterPolicyId string `json:"-" url:"-"` +} + +// Represents a change to the cluster settings required for the cluster to +// become compliant with its policy. +type ClusterSettingsChange struct { + // The field where this change would be made. + Field string `json:"field,omitempty"` + // The new value of this field after enforcing policy compliance (either a + // number, a boolean, or a string) converted to a string. This is intended + // to be read by a human. The typed new value of this field can be retrieved + // by reading the settings field in the API response. + NewValue string `json:"new_value,omitempty"` + // The previous value of this field before enforcing policy compliance + // (either a number, a boolean, or a string) converted to a string. This is + // intended to be read by a human. The type of the field can be retrieved by + // reading the settings field in the API response. + PreviousValue string `json:"previous_value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterSettingsChange) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterSettingsChange) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterSize struct { + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `json:"autoscale,omitempty"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers int `json:"num_workers,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterSize) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterSize) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Determines whether the cluster was created by a user through the UI, created +// by the Databricks Jobs Scheduler, or through an API request. This is the same +// as cluster_creator, but read only. +type ClusterSource string + +const ClusterSourceApi ClusterSource = `API` + +const ClusterSourceJob ClusterSource = `JOB` + +const ClusterSourceModels ClusterSource = `MODELS` + +const ClusterSourcePipeline ClusterSource = `PIPELINE` + +const ClusterSourcePipelineMaintenance ClusterSource = `PIPELINE_MAINTENANCE` + +const ClusterSourceSql ClusterSource = `SQL` + +const ClusterSourceUi ClusterSource = `UI` + +// String representation for [fmt.Print] +func (f *ClusterSource) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ClusterSource) Set(v string) error { + switch v { + case `API`, `JOB`, `MODELS`, `PIPELINE`, `PIPELINE_MAINTENANCE`, `SQL`, `UI`: + *f = ClusterSource(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "API", "JOB", "MODELS", "PIPELINE", "PIPELINE_MAINTENANCE", "SQL", "UI"`, v) + } +} + +// Type always returns ClusterSource to satisfy [pflag.Value] interface +func (f *ClusterSource) Type() string { + return "ClusterSource" +} + +type ClusterSpec struct { + // When set to true, fixed and default values from the policy will be used + // for fields that are omitted. When set to false, only fixed values from + // the policy will be applied. + ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `json:"autoscale,omitempty"` + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` + // The configuration for delivering spark logs to a long-term storage + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName string `json:"cluster_name,omitempty"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // The following modes can only be used with `kind`. * + // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate + // access mode depending on your compute configuration. * + // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * + // `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. + // + // The following modes can be used regardless of `kind`. * `NONE`: No + // security isolation for multiple users sharing the cluster. Data + // governance features are not available in this mode. * `SINGLE_USER`: A + // secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` + + DockerImage *DockerImage `json:"docker_image,omitempty"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId string `json:"instance_pool_id,omitempty"` + // This field can only be used with `kind`. + // + // When set to true, Databricks will automatically set single node related + // `custom_tags`, `spark_conf`, and `num_workers` + IsSingleNode bool `json:"is_single_node,omitempty"` + // The kind of compute described by this compute specification. + // + // Depending on `kind`, different validations and default values will be + // applied. + // + // The first usage of this value is for the simple cluster form where it + // sets `kind = CLASSIC_PREVIEW`. + Kind Kind `json:"kind,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id,omitempty"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers int `json:"num_workers,omitempty"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId string `json:"policy_id,omitempty"` + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. + RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName string `json:"single_user_name,omitempty"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]string `json:"spark_conf,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion string `json:"spark_version,omitempty"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + // This field can only be used with `kind`. + // + // `effective_spark_version` is determined by `spark_version` (DBR release), + // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or + // not. + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` + + WorkloadType *WorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get status +type ClusterStatus struct { + // Unique identifier of the cluster whose status should be retrieved. + ClusterId string `json:"-" url:"cluster_id"` +} + +type Command struct { + // Running cluster id + ClusterId string `json:"clusterId,omitempty"` + // Executable code + Command string `json:"command,omitempty"` + // Running context id + ContextId string `json:"contextId,omitempty"` + + Language Language `json:"language,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Command) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Command) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CommandStatus string + +const CommandStatusCancelled CommandStatus = `Cancelled` + +const CommandStatusCancelling CommandStatus = `Cancelling` + +const CommandStatusError CommandStatus = `Error` + +const CommandStatusFinished CommandStatus = `Finished` + +const CommandStatusQueued CommandStatus = `Queued` + +const CommandStatusRunning CommandStatus = `Running` + +// String representation for [fmt.Print] +func (f *CommandStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CommandStatus) Set(v string) error { + switch v { + case `Cancelled`, `Cancelling`, `Error`, `Finished`, `Queued`, `Running`: + *f = CommandStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Cancelled", "Cancelling", "Error", "Finished", "Queued", "Running"`, v) + } +} + +// Type always returns CommandStatus to satisfy [pflag.Value] interface +func (f *CommandStatus) Type() string { + return "CommandStatus" +} + +// Get command info +type CommandStatusRequest struct { + ClusterId string `json:"-" url:"clusterId"` + + CommandId string `json:"-" url:"commandId"` + + ContextId string `json:"-" url:"contextId"` +} + +type CommandStatusResponse struct { + Id string `json:"id,omitempty"` + + Results *Results `json:"results,omitempty"` + + Status CommandStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CommandStatusResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CommandStatusResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ContextStatus string + +const ContextStatusError ContextStatus = `Error` + +const ContextStatusPending ContextStatus = `Pending` + +const ContextStatusRunning ContextStatus = `Running` + +// String representation for [fmt.Print] +func (f *ContextStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ContextStatus) Set(v string) error { + switch v { + case `Error`, `Pending`, `Running`: + *f = ContextStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Error", "Pending", "Running"`, v) + } +} + +// Type always returns ContextStatus to satisfy [pflag.Value] interface +func (f *ContextStatus) Type() string { + return "ContextStatus" +} + +// Get status +type ContextStatusRequest struct { + ClusterId string `json:"-" url:"clusterId"` + + ContextId string `json:"-" url:"contextId"` +} + +type ContextStatusResponse struct { + Id string `json:"id,omitempty"` + + Status ContextStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ContextStatusResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ContextStatusResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateCluster struct { + // When set to true, fixed and default values from the policy will be used + // for fields that are omitted. When set to false, only fixed values from + // the policy will be applied. + ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `json:"autoscale,omitempty"` + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` + // When specified, this clones libraries from a source cluster during the + // creation of a new cluster. + CloneFrom *CloneCluster `json:"clone_from,omitempty"` + // The configuration for delivering spark logs to a long-term storage + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName string `json:"cluster_name,omitempty"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // The following modes can only be used with `kind`. * + // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate + // access mode depending on your compute configuration. * + // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * + // `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. + // + // The following modes can be used regardless of `kind`. * `NONE`: No + // security isolation for multiple users sharing the cluster. Data + // governance features are not available in this mode. * `SINGLE_USER`: A + // secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` + + DockerImage *DockerImage `json:"docker_image,omitempty"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId string `json:"instance_pool_id,omitempty"` + // This field can only be used with `kind`. + // + // When set to true, Databricks will automatically set single node related + // `custom_tags`, `spark_conf`, and `num_workers` + IsSingleNode bool `json:"is_single_node,omitempty"` + // The kind of compute described by this compute specification. + // + // Depending on `kind`, different validations and default values will be + // applied. + // + // The first usage of this value is for the simple cluster form where it + // sets `kind = CLASSIC_PREVIEW`. + Kind Kind `json:"kind,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id,omitempty"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers int `json:"num_workers,omitempty"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId string `json:"policy_id,omitempty"` + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. + RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName string `json:"single_user_name,omitempty"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]string `json:"spark_conf,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion string `json:"spark_version"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + // This field can only be used with `kind`. + // + // `effective_spark_version` is determined by `spark_version` (DBR release), + // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or + // not. + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` + + WorkloadType *WorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateCluster) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateCluster) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateClusterResponse struct { + ClusterId string `json:"cluster_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateClusterResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateClusterResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateContext struct { + // Running cluster id + ClusterId string `json:"clusterId,omitempty"` + + Language Language `json:"language,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateContext) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateContext) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateInstancePool struct { + // Attributes related to instance pools running on Amazon Web Services. If + // not specified at pool creation, a set of default values will be used. + AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to instance pools running on Azure. If not specified + // at pool creation, a set of default values will be used. + AzureAttributes *InstancePoolAzureAttributes `json:"azure_attributes,omitempty"` + // Additional tags for pool resources. Databricks will tag all pool + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Defines the specification of the disks that will be attached to all spark + // containers. + DiskSpec *DiskSpec `json:"disk_spec,omitempty"` + // Autoscaling Local Storage: when enabled, this instances in this pool will + // dynamically acquire additional disk space when its Spark workers are + // running low on disk space. In AWS, this feature requires specific AWS + // permissions to function correctly - refer to the User Guide for more + // details. + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + // Attributes related to instance pools running on Google Cloud Platform. If + // not specified at pool creation, a set of default values will be used. + GcpAttributes *InstancePoolGcpAttributes `json:"gcp_attributes,omitempty"` + // Automatically terminates the extra instances in the pool cache after they + // are inactive for this time in minutes if min_idle_instances requirement + // is already met. If not set, the extra pool instances will be + // automatically terminated after a default timeout. If specified, the + // threshold must be between 0 and 10000 minutes. Users can also set this + // value to 0 to instantly remove idle instances from the cache if min cache + // size could still hold. + IdleInstanceAutoterminationMinutes int `json:"idle_instance_autotermination_minutes,omitempty"` + // Pool name requested by the user. Pool name must be unique. Length must be + // between 1 and 100 characters. + InstancePoolName string `json:"instance_pool_name"` + // Maximum number of outstanding instances to keep in the pool, including + // both instances used by clusters and idle instances. Clusters that require + // further instance provisioning will fail during upsize requests. + MaxCapacity int `json:"max_capacity,omitempty"` + // Minimum number of idle instances to keep in the instance pool + MinIdleInstances int `json:"min_idle_instances,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id"` + // Custom Docker Image BYOC + PreloadedDockerImages []DockerImage `json:"preloaded_docker_images,omitempty"` + // A list containing at most one preloaded Spark image version for the pool. + // Pool-backed clusters started with the preloaded Spark version will start + // faster. A list of available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateInstancePool) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateInstancePool) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateInstancePoolResponse struct { + // The ID of the created instance pool. + InstancePoolId string `json:"instance_pool_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateInstancePoolResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateInstancePoolResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreatePolicy struct { + // Policy definition document expressed in [Databricks Cluster Policy + // Definition Language]. + // + // [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + Definition string `json:"definition,omitempty"` + // Additional human-readable description of the cluster policy. + Description string `json:"description,omitempty"` + // A list of libraries to be installed on the next cluster restart that uses + // this policy. The maximum number of libraries is 500. + Libraries []Library `json:"libraries,omitempty"` + // Max number of clusters per user that can be active using this policy. If + // not present, there is no max limit. + MaxClustersPerUser int64 `json:"max_clusters_per_user,omitempty"` + // Cluster Policy name requested by the user. This has to be unique. Length + // must be between 1 and 100 characters. + Name string `json:"name,omitempty"` + // Policy definition JSON document expressed in [Databricks Policy + // Definition Language]. The JSON document must be passed as a string and + // cannot be embedded in the requests. + // + // You can use this to customize the policy definition inherited from the + // policy family. Policy rules specified here are merged into the inherited + // policy definition. + // + // [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"` + // ID of the policy family. The cluster policy's policy definition inherits + // the policy family's policy definition. + // + // Cannot be used with `definition`. Use + // `policy_family_definition_overrides` instead to customize the policy + // definition. + PolicyFamilyId string `json:"policy_family_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreatePolicy) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreatePolicy) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreatePolicyResponse struct { + // Canonical unique identifier for the cluster policy. + PolicyId string `json:"policy_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreatePolicyResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreatePolicyResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateResponse struct { + // The global init script ID. + ScriptId string `json:"script_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Created struct { + Id string `json:"id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Created) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Created) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DataPlaneEventDetails struct { + // + EventType DataPlaneEventDetailsEventType `json:"event_type,omitempty"` + // + ExecutorFailures int `json:"executor_failures,omitempty"` + // + HostId string `json:"host_id,omitempty"` + // + Timestamp int64 `json:"timestamp,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DataPlaneEventDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DataPlaneEventDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// +type DataPlaneEventDetailsEventType string + +const DataPlaneEventDetailsEventTypeNodeBlacklisted DataPlaneEventDetailsEventType = `NODE_BLACKLISTED` + +const DataPlaneEventDetailsEventTypeNodeExcludedDecommissioned DataPlaneEventDetailsEventType = `NODE_EXCLUDED_DECOMMISSIONED` + +// String representation for [fmt.Print] +func (f *DataPlaneEventDetailsEventType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DataPlaneEventDetailsEventType) Set(v string) error { + switch v { + case `NODE_BLACKLISTED`, `NODE_EXCLUDED_DECOMMISSIONED`: + *f = DataPlaneEventDetailsEventType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "NODE_BLACKLISTED", "NODE_EXCLUDED_DECOMMISSIONED"`, v) + } +} + +// Type always returns DataPlaneEventDetailsEventType to satisfy [pflag.Value] interface +func (f *DataPlaneEventDetailsEventType) Type() string { + return "DataPlaneEventDetailsEventType" +} + +// Data security mode decides what data governance model to use when accessing +// data from a cluster. +// +// The following modes can only be used with `kind`. * +// `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access +// mode depending on your compute configuration. * +// `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * +// `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. +// +// The following modes can be used regardless of `kind`. * `NONE`: No security +// isolation for multiple users sharing the cluster. Data governance features +// are not available in this mode. * `SINGLE_USER`: A secure cluster that can +// only be exclusively used by a single user specified in `single_user_name`. +// Most programming languages, cluster features and data governance features are +// available in this mode. * `USER_ISOLATION`: A secure cluster that can be +// shared by multiple users. Cluster users are fully isolated so that they +// cannot see each other's data and credentials. Most data governance features +// are supported in this mode. But programming languages and cluster features +// might be limited. +// +// The following modes are deprecated starting with Databricks Runtime 15.0 and +// will be removed for future Databricks Runtime versions: +// +// * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL +// clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating from +// legacy Passthrough on high concurrency clusters. * `LEGACY_SINGLE_USER`: This +// mode is for users migrating from legacy Passthrough on standard clusters. * +// `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have +// UC nor passthrough enabled. +type DataSecurityMode string + +// will choose the most appropriate access mode depending on your +// compute configuration. +const DataSecurityModeDataSecurityModeAuto DataSecurityMode = `DATA_SECURITY_MODE_AUTO` + +// Alias for `SINGLE_USER`. +const DataSecurityModeDataSecurityModeDedicated DataSecurityMode = `DATA_SECURITY_MODE_DEDICATED` + +// Alias for `USER_ISOLATION`. +const DataSecurityModeDataSecurityModeStandard DataSecurityMode = `DATA_SECURITY_MODE_STANDARD` + +// This mode is for users migrating from legacy Passthrough on high concurrency +// clusters. +const DataSecurityModeLegacyPassthrough DataSecurityMode = `LEGACY_PASSTHROUGH` + +// This mode is for users migrating from legacy Passthrough on standard +// clusters. +const DataSecurityModeLegacySingleUser DataSecurityMode = `LEGACY_SINGLE_USER` + +// This mode provides a way that doesn’t have UC nor passthrough enabled. +const DataSecurityModeLegacySingleUserStandard DataSecurityMode = `LEGACY_SINGLE_USER_STANDARD` + +// This mode is for users migrating from legacy Table ACL clusters. +const DataSecurityModeLegacyTableAcl DataSecurityMode = `LEGACY_TABLE_ACL` + +// No security isolation for multiple users sharing the cluster. Data governance +// features are not available in this mode. +const DataSecurityModeNone DataSecurityMode = `NONE` + +// A secure cluster that can only be exclusively used by a single user specified +// in `single_user_name`. Most programming languages, cluster features and data +// governance features are available in this mode. +const DataSecurityModeSingleUser DataSecurityMode = `SINGLE_USER` + +// A secure cluster that can be shared by multiple users. Cluster users are +// fully isolated so that they cannot see each other's data and credentials. +// Most data governance features are supported in this mode. But programming +// languages and cluster features might be limited. +const DataSecurityModeUserIsolation DataSecurityMode = `USER_ISOLATION` + +// String representation for [fmt.Print] +func (f *DataSecurityMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DataSecurityMode) Set(v string) error { + switch v { + case `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, `DATA_SECURITY_MODE_STANDARD`, `LEGACY_PASSTHROUGH`, `LEGACY_SINGLE_USER`, `LEGACY_SINGLE_USER_STANDARD`, `LEGACY_TABLE_ACL`, `NONE`, `SINGLE_USER`, `USER_ISOLATION`: + *f = DataSecurityMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATA_SECURITY_MODE_AUTO", "DATA_SECURITY_MODE_DEDICATED", "DATA_SECURITY_MODE_STANDARD", "LEGACY_PASSTHROUGH", "LEGACY_SINGLE_USER", "LEGACY_SINGLE_USER_STANDARD", "LEGACY_TABLE_ACL", "NONE", "SINGLE_USER", "USER_ISOLATION"`, v) + } +} + +// Type always returns DataSecurityMode to satisfy [pflag.Value] interface +func (f *DataSecurityMode) Type() string { + return "DataSecurityMode" +} + +type DbfsStorageInfo struct { + // dbfs destination, e.g. `dbfs:/my/path` + Destination string `json:"destination"` +} + +type DeleteCluster struct { + // The cluster to be terminated. + ClusterId string `json:"cluster_id"` +} + +type DeleteClusterResponse struct { +} + +// Delete init script +type DeleteGlobalInitScriptRequest struct { + // The ID of the global init script. + ScriptId string `json:"-" url:"-"` +} + +type DeleteInstancePool struct { + // The instance pool to be terminated. + InstancePoolId string `json:"instance_pool_id"` +} + +type DeleteInstancePoolResponse struct { +} + +type DeletePolicy struct { + // The ID of the policy to delete. + PolicyId string `json:"policy_id"` +} + +type DeletePolicyResponse struct { +} + +type DeleteResponse struct { +} + +type DestroyContext struct { + ClusterId string `json:"clusterId"` + + ContextId string `json:"contextId"` +} + +type DestroyResponse struct { +} + +type DiskSpec struct { + // The number of disks launched for each instance: - This feature is only + // enabled for supported node types. - Users can choose up to the limit of + // the disks supported by the node type. - For node types with no OS disk, + // at least one disk must be specified; otherwise, cluster creation will + // fail. + // + // If disks are attached, Databricks will configure Spark to use only the + // disks for scratch storage, because heterogenously sized scratch devices + // can lead to inefficient disk utilization. If no disks are attached, + // Databricks will configure Spark to use instance store disks. + // + // Note: If disks are specified, then the Spark configuration + // `spark.local.dir` will be overridden. + // + // Disks will be mounted at: - For AWS: `/ebs0`, `/ebs1`, and etc. - For + // Azure: `/remote_volume0`, `/remote_volume1`, and etc. + DiskCount int `json:"disk_count,omitempty"` + + DiskIops int `json:"disk_iops,omitempty"` + // The size of each disk (in GiB) launched for each instance. Values must + // fall into the supported range for a particular instance type. + // + // For AWS: - General Purpose SSD: 100 - 4096 GiB - Throughput Optimized + // HDD: 500 - 4096 GiB + // + // For Azure: - Premium LRS (SSD): 1 - 1023 GiB - Standard LRS (HDD): 1- + // 1023 GiB + DiskSize int `json:"disk_size,omitempty"` + + DiskThroughput int `json:"disk_throughput,omitempty"` + // The type of disks that will be launched with this cluster. + DiskType *DiskType `json:"disk_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DiskSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DiskSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DiskType struct { + AzureDiskVolumeType DiskTypeAzureDiskVolumeType `json:"azure_disk_volume_type,omitempty"` + + EbsVolumeType DiskTypeEbsVolumeType `json:"ebs_volume_type,omitempty"` +} + +type DiskTypeAzureDiskVolumeType string + +const DiskTypeAzureDiskVolumeTypePremiumLrs DiskTypeAzureDiskVolumeType = `PREMIUM_LRS` + +const DiskTypeAzureDiskVolumeTypeStandardLrs DiskTypeAzureDiskVolumeType = `STANDARD_LRS` + +// String representation for [fmt.Print] +func (f *DiskTypeAzureDiskVolumeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DiskTypeAzureDiskVolumeType) Set(v string) error { + switch v { + case `PREMIUM_LRS`, `STANDARD_LRS`: + *f = DiskTypeAzureDiskVolumeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PREMIUM_LRS", "STANDARD_LRS"`, v) + } +} + +// Type always returns DiskTypeAzureDiskVolumeType to satisfy [pflag.Value] interface +func (f *DiskTypeAzureDiskVolumeType) Type() string { + return "DiskTypeAzureDiskVolumeType" +} + +type DiskTypeEbsVolumeType string + +const DiskTypeEbsVolumeTypeGeneralPurposeSsd DiskTypeEbsVolumeType = `GENERAL_PURPOSE_SSD` + +const DiskTypeEbsVolumeTypeThroughputOptimizedHdd DiskTypeEbsVolumeType = `THROUGHPUT_OPTIMIZED_HDD` + +// String representation for [fmt.Print] +func (f *DiskTypeEbsVolumeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DiskTypeEbsVolumeType) Set(v string) error { + switch v { + case `GENERAL_PURPOSE_SSD`, `THROUGHPUT_OPTIMIZED_HDD`: + *f = DiskTypeEbsVolumeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GENERAL_PURPOSE_SSD", "THROUGHPUT_OPTIMIZED_HDD"`, v) + } +} + +// Type always returns DiskTypeEbsVolumeType to satisfy [pflag.Value] interface +func (f *DiskTypeEbsVolumeType) Type() string { + return "DiskTypeEbsVolumeType" +} + +type DockerBasicAuth struct { + // Password of the user + Password string `json:"password,omitempty"` + // Name of the user + Username string `json:"username,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DockerBasicAuth) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DockerBasicAuth) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DockerImage struct { + BasicAuth *DockerBasicAuth `json:"basic_auth,omitempty"` + // URL of the docker image. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DockerImage) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DockerImage) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of EBS volumes that will be launched with this cluster. +type EbsVolumeType string + +const EbsVolumeTypeGeneralPurposeSsd EbsVolumeType = `GENERAL_PURPOSE_SSD` + +const EbsVolumeTypeThroughputOptimizedHdd EbsVolumeType = `THROUGHPUT_OPTIMIZED_HDD` + +// String representation for [fmt.Print] +func (f *EbsVolumeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EbsVolumeType) Set(v string) error { + switch v { + case `GENERAL_PURPOSE_SSD`, `THROUGHPUT_OPTIMIZED_HDD`: + *f = EbsVolumeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GENERAL_PURPOSE_SSD", "THROUGHPUT_OPTIMIZED_HDD"`, v) + } +} + +// Type always returns EbsVolumeType to satisfy [pflag.Value] interface +func (f *EbsVolumeType) Type() string { + return "EbsVolumeType" +} + +type EditCluster struct { + // When set to true, fixed and default values from the policy will be used + // for fields that are omitted. When set to false, only fixed values from + // the policy will be applied. + ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `json:"autoscale,omitempty"` + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` + // ID of the cluster + ClusterId string `json:"cluster_id"` + // The configuration for delivering spark logs to a long-term storage + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName string `json:"cluster_name,omitempty"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // The following modes can only be used with `kind`. * + // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate + // access mode depending on your compute configuration. * + // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * + // `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. + // + // The following modes can be used regardless of `kind`. * `NONE`: No + // security isolation for multiple users sharing the cluster. Data + // governance features are not available in this mode. * `SINGLE_USER`: A + // secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` + + DockerImage *DockerImage `json:"docker_image,omitempty"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId string `json:"instance_pool_id,omitempty"` + // This field can only be used with `kind`. + // + // When set to true, Databricks will automatically set single node related + // `custom_tags`, `spark_conf`, and `num_workers` + IsSingleNode bool `json:"is_single_node,omitempty"` + // The kind of compute described by this compute specification. + // + // Depending on `kind`, different validations and default values will be + // applied. + // + // The first usage of this value is for the simple cluster form where it + // sets `kind = CLASSIC_PREVIEW`. + Kind Kind `json:"kind,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id,omitempty"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers int `json:"num_workers,omitempty"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId string `json:"policy_id,omitempty"` + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. + RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName string `json:"single_user_name,omitempty"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]string `json:"spark_conf,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion string `json:"spark_version"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + // This field can only be used with `kind`. + // + // `effective_spark_version` is determined by `spark_version` (DBR release), + // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or + // not. + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` + + WorkloadType *WorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EditCluster) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EditCluster) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EditClusterResponse struct { +} + +type EditInstancePool struct { + // Additional tags for pool resources. Databricks will tag all pool + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Automatically terminates the extra instances in the pool cache after they + // are inactive for this time in minutes if min_idle_instances requirement + // is already met. If not set, the extra pool instances will be + // automatically terminated after a default timeout. If specified, the + // threshold must be between 0 and 10000 minutes. Users can also set this + // value to 0 to instantly remove idle instances from the cache if min cache + // size could still hold. + IdleInstanceAutoterminationMinutes int `json:"idle_instance_autotermination_minutes,omitempty"` + // Instance pool ID + InstancePoolId string `json:"instance_pool_id"` + // Pool name requested by the user. Pool name must be unique. Length must be + // between 1 and 100 characters. + InstancePoolName string `json:"instance_pool_name"` + // Maximum number of outstanding instances to keep in the pool, including + // both instances used by clusters and idle instances. Clusters that require + // further instance provisioning will fail during upsize requests. + MaxCapacity int `json:"max_capacity,omitempty"` + // Minimum number of idle instances to keep in the instance pool + MinIdleInstances int `json:"min_idle_instances,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id"` + + ForceSendFields []string `json:"-"` +} + +func (s *EditInstancePool) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EditInstancePool) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EditInstancePoolResponse struct { +} + +type EditPolicy struct { + // Policy definition document expressed in [Databricks Cluster Policy + // Definition Language]. + // + // [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + Definition string `json:"definition,omitempty"` + // Additional human-readable description of the cluster policy. + Description string `json:"description,omitempty"` + // A list of libraries to be installed on the next cluster restart that uses + // this policy. The maximum number of libraries is 500. + Libraries []Library `json:"libraries,omitempty"` + // Max number of clusters per user that can be active using this policy. If + // not present, there is no max limit. + MaxClustersPerUser int64 `json:"max_clusters_per_user,omitempty"` + // Cluster Policy name requested by the user. This has to be unique. Length + // must be between 1 and 100 characters. + Name string `json:"name,omitempty"` + // Policy definition JSON document expressed in [Databricks Policy + // Definition Language]. The JSON document must be passed as a string and + // cannot be embedded in the requests. + // + // You can use this to customize the policy definition inherited from the + // policy family. Policy rules specified here are merged into the inherited + // policy definition. + // + // [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"` + // ID of the policy family. The cluster policy's policy definition inherits + // the policy family's policy definition. + // + // Cannot be used with `definition`. Use + // `policy_family_definition_overrides` instead to customize the policy + // definition. + PolicyFamilyId string `json:"policy_family_id,omitempty"` + // The ID of the policy to update. + PolicyId string `json:"policy_id"` + + ForceSendFields []string `json:"-"` +} + +func (s *EditPolicy) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EditPolicy) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EditPolicyResponse struct { +} + +type EditResponse struct { +} + +type EnforceClusterComplianceRequest struct { + // The ID of the cluster you want to enforce policy compliance on. + ClusterId string `json:"cluster_id"` + // If set, previews the changes that would be made to a cluster to enforce + // compliance but does not update the cluster. + ValidateOnly bool `json:"validate_only,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EnforceClusterComplianceRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnforceClusterComplianceRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EnforceClusterComplianceResponse struct { + // A list of changes that have been made to the cluster settings for the + // cluster to become compliant with its policy. + Changes []ClusterSettingsChange `json:"changes,omitempty"` + // Whether any changes have been made to the cluster settings for the + // cluster to become compliant with its policy. + HasChanges bool `json:"has_changes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EnforceClusterComplianceResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnforceClusterComplianceResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EventDetails struct { + // * For created clusters, the attributes of the cluster. * For edited + // clusters, the new attributes of the cluster. + Attributes *ClusterAttributes `json:"attributes,omitempty"` + // The cause of a change in target size. + Cause EventDetailsCause `json:"cause,omitempty"` + // The actual cluster size that was set in the cluster creation or edit. + ClusterSize *ClusterSize `json:"cluster_size,omitempty"` + // The current number of vCPUs in the cluster. + CurrentNumVcpus int `json:"current_num_vcpus,omitempty"` + // The current number of nodes in the cluster. + CurrentNumWorkers int `json:"current_num_workers,omitempty"` + // + DidNotExpandReason string `json:"did_not_expand_reason,omitempty"` + // Current disk size in bytes + DiskSize int64 `json:"disk_size,omitempty"` + // More details about the change in driver's state + DriverStateMessage string `json:"driver_state_message,omitempty"` + // Whether or not a blocklisted node should be terminated. For + // ClusterEventType NODE_BLACKLISTED. + EnableTerminationForNodeBlocklisted bool `json:"enable_termination_for_node_blocklisted,omitempty"` + // + FreeSpace int64 `json:"free_space,omitempty"` + // List of global and cluster init scripts associated with this cluster + // event. + InitScripts *InitScriptEventDetails `json:"init_scripts,omitempty"` + // Instance Id where the event originated from + InstanceId string `json:"instance_id,omitempty"` + // Unique identifier of the specific job run associated with this cluster + // event * For clusters created for jobs, this will be the same as the + // cluster name + JobRunName string `json:"job_run_name,omitempty"` + // The cluster attributes before a cluster was edited. + PreviousAttributes *ClusterAttributes `json:"previous_attributes,omitempty"` + // The size of the cluster before an edit or resize. + PreviousClusterSize *ClusterSize `json:"previous_cluster_size,omitempty"` + // Previous disk size in bytes + PreviousDiskSize int64 `json:"previous_disk_size,omitempty"` + // A termination reason: * On a TERMINATED event, this is the reason of the + // termination. * On a RESIZE_COMPLETE event, this indicates the reason that + // we failed to acquire some nodes. + Reason *TerminationReason `json:"reason,omitempty"` + // The targeted number of vCPUs in the cluster. + TargetNumVcpus int `json:"target_num_vcpus,omitempty"` + // The targeted number of nodes in the cluster. + TargetNumWorkers int `json:"target_num_workers,omitempty"` + // The user that caused the event to occur. (Empty if it was done by the + // control plane.) + User string `json:"user,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EventDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EventDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The cause of a change in target size. +type EventDetailsCause string + +const EventDetailsCauseAutorecovery EventDetailsCause = `AUTORECOVERY` + +const EventDetailsCauseAutoscale EventDetailsCause = `AUTOSCALE` + +const EventDetailsCauseReplaceBadNodes EventDetailsCause = `REPLACE_BAD_NODES` + +const EventDetailsCauseUserRequest EventDetailsCause = `USER_REQUEST` + +// String representation for [fmt.Print] +func (f *EventDetailsCause) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EventDetailsCause) Set(v string) error { + switch v { + case `AUTORECOVERY`, `AUTOSCALE`, `REPLACE_BAD_NODES`, `USER_REQUEST`: + *f = EventDetailsCause(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AUTORECOVERY", "AUTOSCALE", "REPLACE_BAD_NODES", "USER_REQUEST"`, v) + } +} + +// Type always returns EventDetailsCause to satisfy [pflag.Value] interface +func (f *EventDetailsCause) Type() string { + return "EventDetailsCause" +} + +type EventType string + +const EventTypeAddNodesFailed EventType = `ADD_NODES_FAILED` + +const EventTypeAutomaticClusterUpdate EventType = `AUTOMATIC_CLUSTER_UPDATE` + +const EventTypeAutoscalingBackoff EventType = `AUTOSCALING_BACKOFF` + +const EventTypeAutoscalingFailed EventType = `AUTOSCALING_FAILED` + +const EventTypeAutoscalingStatsReport EventType = `AUTOSCALING_STATS_REPORT` + +const EventTypeCreating EventType = `CREATING` + +const EventTypeDbfsDown EventType = `DBFS_DOWN` + +const EventTypeDidNotExpandDisk EventType = `DID_NOT_EXPAND_DISK` + +const EventTypeDriverHealthy EventType = `DRIVER_HEALTHY` + +const EventTypeDriverNotResponding EventType = `DRIVER_NOT_RESPONDING` + +const EventTypeDriverUnavailable EventType = `DRIVER_UNAVAILABLE` + +const EventTypeEdited EventType = `EDITED` + +const EventTypeExpandedDisk EventType = `EXPANDED_DISK` + +const EventTypeFailedToExpandDisk EventType = `FAILED_TO_EXPAND_DISK` + +const EventTypeInitScriptsFinished EventType = `INIT_SCRIPTS_FINISHED` + +const EventTypeInitScriptsStarted EventType = `INIT_SCRIPTS_STARTED` + +const EventTypeMetastoreDown EventType = `METASTORE_DOWN` + +const EventTypeNodesLost EventType = `NODES_LOST` + +const EventTypeNodeBlacklisted EventType = `NODE_BLACKLISTED` + +const EventTypeNodeExcludedDecommissioned EventType = `NODE_EXCLUDED_DECOMMISSIONED` + +const EventTypePinned EventType = `PINNED` + +const EventTypeResizing EventType = `RESIZING` + +const EventTypeRestarting EventType = `RESTARTING` + +const EventTypeRunning EventType = `RUNNING` + +const EventTypeSparkException EventType = `SPARK_EXCEPTION` + +const EventTypeStarting EventType = `STARTING` + +const EventTypeTerminating EventType = `TERMINATING` + +const EventTypeUnpinned EventType = `UNPINNED` + +const EventTypeUpsizeCompleted EventType = `UPSIZE_COMPLETED` + +// String representation for [fmt.Print] +func (f *EventType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EventType) Set(v string) error { + switch v { + case `ADD_NODES_FAILED`, `AUTOMATIC_CLUSTER_UPDATE`, `AUTOSCALING_BACKOFF`, `AUTOSCALING_FAILED`, `AUTOSCALING_STATS_REPORT`, `CREATING`, `DBFS_DOWN`, `DID_NOT_EXPAND_DISK`, `DRIVER_HEALTHY`, `DRIVER_NOT_RESPONDING`, `DRIVER_UNAVAILABLE`, `EDITED`, `EXPANDED_DISK`, `FAILED_TO_EXPAND_DISK`, `INIT_SCRIPTS_FINISHED`, `INIT_SCRIPTS_STARTED`, `METASTORE_DOWN`, `NODES_LOST`, `NODE_BLACKLISTED`, `NODE_EXCLUDED_DECOMMISSIONED`, `PINNED`, `RESIZING`, `RESTARTING`, `RUNNING`, `SPARK_EXCEPTION`, `STARTING`, `TERMINATING`, `UNPINNED`, `UPSIZE_COMPLETED`: + *f = EventType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ADD_NODES_FAILED", "AUTOMATIC_CLUSTER_UPDATE", "AUTOSCALING_BACKOFF", "AUTOSCALING_FAILED", "AUTOSCALING_STATS_REPORT", "CREATING", "DBFS_DOWN", "DID_NOT_EXPAND_DISK", "DRIVER_HEALTHY", "DRIVER_NOT_RESPONDING", "DRIVER_UNAVAILABLE", "EDITED", "EXPANDED_DISK", "FAILED_TO_EXPAND_DISK", "INIT_SCRIPTS_FINISHED", "INIT_SCRIPTS_STARTED", "METASTORE_DOWN", "NODES_LOST", "NODE_BLACKLISTED", "NODE_EXCLUDED_DECOMMISSIONED", "PINNED", "RESIZING", "RESTARTING", "RUNNING", "SPARK_EXCEPTION", "STARTING", "TERMINATING", "UNPINNED", "UPSIZE_COMPLETED"`, v) + } +} + +// Type always returns EventType to satisfy [pflag.Value] interface +func (f *EventType) Type() string { + return "EventType" +} + +type GcpAttributes struct { + // This field determines whether the instance pool will contain preemptible + // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs + // if the former is unavailable. + Availability GcpAvailability `json:"availability,omitempty"` + // boot disk size in GB + BootDiskSize int `json:"boot_disk_size,omitempty"` + // If provided, the cluster will impersonate the google service account when + // accessing gcloud services (like GCS). The google service account must + // have previously been added to the Databricks environment by an account + // administrator. + GoogleServiceAccount string `json:"google_service_account,omitempty"` + // If provided, each node (workers and driver) in the cluster will have this + // number of local SSDs attached. Each local SSD is 375GB in size. Refer to + // [GCP documentation] for the supported number of local SSDs for each + // instance type. + // + // [GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds + LocalSsdCount int `json:"local_ssd_count,omitempty"` + // This field determines whether the spark executors will be scheduled to + // run on preemptible VMs (when set to true) versus standard compute engine + // VMs (when set to false; default). Note: Soon to be deprecated, use the + // availability field instead. + UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` + // Identifier for the availability zone in which the cluster resides. This + // can be one of the following: - "HA" => High availability, spread nodes + // across availability zones for a Databricks deployment region [default] - + // "AUTO" => Databricks picks an availability zone to schedule the cluster + // on. - A GCP availability zone => Pick One of the available zones for + // (machine type + region) from + // https://cloud.google.com/compute/docs/regions-zones. + ZoneId string `json:"zone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GcpAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GcpAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// This field determines whether the instance pool will contain preemptible VMs, +// on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the +// former is unavailable. +type GcpAvailability string + +const GcpAvailabilityOnDemandGcp GcpAvailability = `ON_DEMAND_GCP` + +const GcpAvailabilityPreemptibleGcp GcpAvailability = `PREEMPTIBLE_GCP` + +const GcpAvailabilityPreemptibleWithFallbackGcp GcpAvailability = `PREEMPTIBLE_WITH_FALLBACK_GCP` + +// String representation for [fmt.Print] +func (f *GcpAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GcpAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_GCP`, `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP`: + *f = GcpAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_GCP", "PREEMPTIBLE_GCP", "PREEMPTIBLE_WITH_FALLBACK_GCP"`, v) + } +} + +// Type always returns GcpAvailability to satisfy [pflag.Value] interface +func (f *GcpAvailability) Type() string { + return "GcpAvailability" +} + +type GcsStorageInfo struct { + // GCS destination/URI, e.g. `gs://my-bucket/some-prefix` + Destination string `json:"destination"` +} + +// Get cluster policy compliance +type GetClusterComplianceRequest struct { + // The ID of the cluster to get the compliance status + ClusterId string `json:"-" url:"cluster_id"` +} + +type GetClusterComplianceResponse struct { + // Whether the cluster is compliant with its policy or not. Clusters could + // be out of compliance if the policy was updated after the cluster was last + // edited. + IsCompliant bool `json:"is_compliant,omitempty"` + // An object containing key-value mappings representing the first 200 policy + // validation errors. The keys indicate the path where the policy validation + // error is occurring. The values indicate an error message describing the + // policy validation error. + Violations map[string]string `json:"violations,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetClusterComplianceResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetClusterComplianceResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get cluster permission levels +type GetClusterPermissionLevelsRequest struct { + // The cluster for which to get or manage permissions. + ClusterId string `json:"-" url:"-"` +} + +type GetClusterPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []ClusterPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get cluster permissions +type GetClusterPermissionsRequest struct { + // The cluster for which to get or manage permissions. + ClusterId string `json:"-" url:"-"` +} + +// Get cluster policy permission levels +type GetClusterPolicyPermissionLevelsRequest struct { + // The cluster policy for which to get or manage permissions. + ClusterPolicyId string `json:"-" url:"-"` +} + +type GetClusterPolicyPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []ClusterPolicyPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get cluster policy permissions +type GetClusterPolicyPermissionsRequest struct { + // The cluster policy for which to get or manage permissions. + ClusterPolicyId string `json:"-" url:"-"` +} + +// Get a cluster policy +type GetClusterPolicyRequest struct { + // Canonical unique identifier for the Cluster Policy. + PolicyId string `json:"-" url:"policy_id"` +} + +// Get cluster info +type GetClusterRequest struct { + // The cluster about which to retrieve information. + ClusterId string `json:"-" url:"cluster_id"` +} + +type GetEvents struct { + // The ID of the cluster to retrieve events about. + ClusterId string `json:"cluster_id"` + // The end time in epoch milliseconds. If empty, returns events up to the + // current time. + EndTime int64 `json:"end_time,omitempty"` + // An optional set of event types to filter on. If empty, all event types + // are returned. + EventTypes []EventType `json:"event_types,omitempty"` + // The maximum number of events to include in a page of events. Defaults to + // 50, and maximum allowed value is 500. + Limit int64 `json:"limit,omitempty"` + // The offset in the result set. Defaults to 0 (no offset). When an offset + // is specified and the results are requested in descending order, the + // end_time field is required. + Offset int64 `json:"offset,omitempty"` + // The order to list events in; either "ASC" or "DESC". Defaults to "DESC". + Order GetEventsOrder `json:"order,omitempty"` + // The start time in epoch milliseconds. If empty, returns events starting + // from the beginning of time. + StartTime int64 `json:"start_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetEvents) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetEvents) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The order to list events in; either "ASC" or "DESC". Defaults to "DESC". +type GetEventsOrder string + +const GetEventsOrderAsc GetEventsOrder = `ASC` + +const GetEventsOrderDesc GetEventsOrder = `DESC` + +// String representation for [fmt.Print] +func (f *GetEventsOrder) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetEventsOrder) Set(v string) error { + switch v { + case `ASC`, `DESC`: + *f = GetEventsOrder(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ASC", "DESC"`, v) + } +} + +// Type always returns GetEventsOrder to satisfy [pflag.Value] interface +func (f *GetEventsOrder) Type() string { + return "GetEventsOrder" +} + +type GetEventsResponse struct { + // + Events []ClusterEvent `json:"events,omitempty"` + // The parameters required to retrieve the next page of events. Omitted if + // there are no more events to read. + NextPage *GetEvents `json:"next_page,omitempty"` + // The total number of events filtered by the start_time, end_time, and + // event_types. + TotalCount int64 `json:"total_count,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetEventsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetEventsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get an init script +type GetGlobalInitScriptRequest struct { + // The ID of the global init script. + ScriptId string `json:"-" url:"-"` +} + +type GetInstancePool struct { + // Attributes related to instance pools running on Amazon Web Services. If + // not specified at pool creation, a set of default values will be used. + AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to instance pools running on Azure. If not specified + // at pool creation, a set of default values will be used. + AzureAttributes *InstancePoolAzureAttributes `json:"azure_attributes,omitempty"` + // Additional tags for pool resources. Databricks will tag all pool + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Tags that are added by Databricks regardless of any `custom_tags`, + // including: + // + // - Vendor: Databricks + // + // - InstancePoolCreator: + // + // - InstancePoolName: + // + // - InstancePoolId: + DefaultTags map[string]string `json:"default_tags,omitempty"` + // Defines the specification of the disks that will be attached to all spark + // containers. + DiskSpec *DiskSpec `json:"disk_spec,omitempty"` + // Autoscaling Local Storage: when enabled, this instances in this pool will + // dynamically acquire additional disk space when its Spark workers are + // running low on disk space. In AWS, this feature requires specific AWS + // permissions to function correctly - refer to the User Guide for more + // details. + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + // Attributes related to instance pools running on Google Cloud Platform. If + // not specified at pool creation, a set of default values will be used. + GcpAttributes *InstancePoolGcpAttributes `json:"gcp_attributes,omitempty"` + // Automatically terminates the extra instances in the pool cache after they + // are inactive for this time in minutes if min_idle_instances requirement + // is already met. If not set, the extra pool instances will be + // automatically terminated after a default timeout. If specified, the + // threshold must be between 0 and 10000 minutes. Users can also set this + // value to 0 to instantly remove idle instances from the cache if min cache + // size could still hold. + IdleInstanceAutoterminationMinutes int `json:"idle_instance_autotermination_minutes,omitempty"` + // Canonical unique identifier for the pool. + InstancePoolId string `json:"instance_pool_id"` + // Pool name requested by the user. Pool name must be unique. Length must be + // between 1 and 100 characters. + InstancePoolName string `json:"instance_pool_name,omitempty"` + // Maximum number of outstanding instances to keep in the pool, including + // both instances used by clusters and idle instances. Clusters that require + // further instance provisioning will fail during upsize requests. + MaxCapacity int `json:"max_capacity,omitempty"` + // Minimum number of idle instances to keep in the instance pool + MinIdleInstances int `json:"min_idle_instances,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id,omitempty"` + // Custom Docker Image BYOC + PreloadedDockerImages []DockerImage `json:"preloaded_docker_images,omitempty"` + // A list containing at most one preloaded Spark image version for the pool. + // Pool-backed clusters started with the preloaded Spark version will start + // faster. A list of available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` + // Current state of the instance pool. + State InstancePoolState `json:"state,omitempty"` + // Usage statistics about the instance pool. + Stats *InstancePoolStats `json:"stats,omitempty"` + // Status of failed pending instances in the pool. + Status *InstancePoolStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetInstancePool) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetInstancePool) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get instance pool permission levels +type GetInstancePoolPermissionLevelsRequest struct { + // The instance pool for which to get or manage permissions. + InstancePoolId string `json:"-" url:"-"` +} + +type GetInstancePoolPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []InstancePoolPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get instance pool permissions +type GetInstancePoolPermissionsRequest struct { + // The instance pool for which to get or manage permissions. + InstancePoolId string `json:"-" url:"-"` +} + +// Get instance pool information +type GetInstancePoolRequest struct { + // The canonical unique identifier for the instance pool. + InstancePoolId string `json:"-" url:"instance_pool_id"` +} + +// Get policy family information +type GetPolicyFamilyRequest struct { + // The family ID about which to retrieve information. + PolicyFamilyId string `json:"-" url:"-"` + // The version number for the family to fetch. Defaults to the latest + // version. + Version int64 `json:"-" url:"version,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetPolicyFamilyRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetPolicyFamilyRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GetSparkVersionsResponse struct { + // All the available Spark versions. + Versions []SparkVersion `json:"versions,omitempty"` +} + +type GlobalInitScriptCreateRequest struct { + // Specifies whether the script is enabled. The script runs only if enabled. + Enabled bool `json:"enabled,omitempty"` + // The name of the script + Name string `json:"name"` + // The position of a global init script, where 0 represents the first script + // to run, 1 is the second script to run, in ascending order. + // + // If you omit the numeric position for a new global init script, it + // defaults to last position. It will run after all current scripts. Setting + // any value greater than the position of the last script is equivalent to + // the last position. Example: Take three existing scripts with positions 0, + // 1, and 2. Any position of (3) or greater puts the script in the last + // position. If an explicit position value conflicts with an existing script + // value, your request succeeds, but the original script at that position + // and all later scripts have their positions incremented by 1. + Position int `json:"position,omitempty"` + // The Base64-encoded content of the script. + Script string `json:"script"` + + ForceSendFields []string `json:"-"` +} + +func (s *GlobalInitScriptCreateRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GlobalInitScriptCreateRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GlobalInitScriptDetails struct { + // Time when the script was created, represented as a Unix timestamp in + // milliseconds. + CreatedAt int `json:"created_at,omitempty"` + // The username of the user who created the script. + CreatedBy string `json:"created_by,omitempty"` + // Specifies whether the script is enabled. The script runs only if enabled. + Enabled bool `json:"enabled,omitempty"` + // The name of the script + Name string `json:"name,omitempty"` + // The position of a script, where 0 represents the first script to run, 1 + // is the second script to run, in ascending order. + Position int `json:"position,omitempty"` + // The global init script ID. + ScriptId string `json:"script_id,omitempty"` + // Time when the script was updated, represented as a Unix timestamp in + // milliseconds. + UpdatedAt int `json:"updated_at,omitempty"` + // The username of the user who last updated the script + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GlobalInitScriptDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GlobalInitScriptDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GlobalInitScriptDetailsWithContent struct { + // Time when the script was created, represented as a Unix timestamp in + // milliseconds. + CreatedAt int `json:"created_at,omitempty"` + // The username of the user who created the script. + CreatedBy string `json:"created_by,omitempty"` + // Specifies whether the script is enabled. The script runs only if enabled. + Enabled bool `json:"enabled,omitempty"` + // The name of the script + Name string `json:"name,omitempty"` + // The position of a script, where 0 represents the first script to run, 1 + // is the second script to run, in ascending order. + Position int `json:"position,omitempty"` + // The Base64-encoded content of the script. + Script string `json:"script,omitempty"` + // The global init script ID. + ScriptId string `json:"script_id,omitempty"` + // Time when the script was updated, represented as a Unix timestamp in + // milliseconds. + UpdatedAt int `json:"updated_at,omitempty"` + // The username of the user who last updated the script + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GlobalInitScriptDetailsWithContent) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GlobalInitScriptDetailsWithContent) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GlobalInitScriptUpdateRequest struct { + // Specifies whether the script is enabled. The script runs only if enabled. + Enabled bool `json:"enabled,omitempty"` + // The name of the script + Name string `json:"name"` + // The position of a script, where 0 represents the first script to run, 1 + // is the second script to run, in ascending order. To move the script to + // run first, set its position to 0. + // + // To move the script to the end, set its position to any value greater or + // equal to the position of the last script. Example, three existing scripts + // with positions 0, 1, and 2. Any position value of 2 or greater puts the + // script in the last position (2). + // + // If an explicit position value conflicts with an existing script, your + // request succeeds, but the original script at that position and all later + // scripts have their positions incremented by 1. + Position int `json:"position,omitempty"` + // The Base64-encoded content of the script. + Script string `json:"script"` + // The ID of the global init script. + ScriptId string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *GlobalInitScriptUpdateRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GlobalInitScriptUpdateRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InitScriptEventDetails struct { + // The cluster scoped init scripts associated with this cluster event + Cluster []InitScriptInfoAndExecutionDetails `json:"cluster,omitempty"` + // The global init scripts associated with this cluster event + Global []InitScriptInfoAndExecutionDetails `json:"global,omitempty"` + // The private ip address of the node where the init scripts were run. + ReportedForNode string `json:"reported_for_node,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InitScriptEventDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InitScriptEventDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InitScriptExecutionDetails struct { + // Addition details regarding errors. + ErrorMessage string `json:"error_message,omitempty"` + // The duration of the script execution in seconds. + ExecutionDurationSeconds int `json:"execution_duration_seconds,omitempty"` + // The current status of the script + Status InitScriptExecutionDetailsStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InitScriptExecutionDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InitScriptExecutionDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The current status of the script +type InitScriptExecutionDetailsStatus string + +const InitScriptExecutionDetailsStatusFailedExecution InitScriptExecutionDetailsStatus = `FAILED_EXECUTION` + +const InitScriptExecutionDetailsStatusFailedFetch InitScriptExecutionDetailsStatus = `FAILED_FETCH` + +const InitScriptExecutionDetailsStatusNotExecuted InitScriptExecutionDetailsStatus = `NOT_EXECUTED` + +const InitScriptExecutionDetailsStatusSkipped InitScriptExecutionDetailsStatus = `SKIPPED` + +const InitScriptExecutionDetailsStatusSucceeded InitScriptExecutionDetailsStatus = `SUCCEEDED` + +const InitScriptExecutionDetailsStatusUnknown InitScriptExecutionDetailsStatus = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *InitScriptExecutionDetailsStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InitScriptExecutionDetailsStatus) Set(v string) error { + switch v { + case `FAILED_EXECUTION`, `FAILED_FETCH`, `NOT_EXECUTED`, `SKIPPED`, `SUCCEEDED`, `UNKNOWN`: + *f = InitScriptExecutionDetailsStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED_EXECUTION", "FAILED_FETCH", "NOT_EXECUTED", "SKIPPED", "SUCCEEDED", "UNKNOWN"`, v) + } +} + +// Type always returns InitScriptExecutionDetailsStatus to satisfy [pflag.Value] interface +func (f *InitScriptExecutionDetailsStatus) Type() string { + return "InitScriptExecutionDetailsStatus" +} + +type InitScriptInfo struct { + // destination needs to be provided. e.g. `{ "abfss" : { "destination" : + // "abfss://@.dfs.core.windows.net/" + // } } + Abfss *Adlsgen2Info `json:"abfss,omitempty"` + // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : + // "dbfs:/home/cluster_log" } }` + Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` + // destination needs to be provided. e.g. `{ "file" : { "destination" : + // "file:/my/local/file.sh" } }` + File *LocalFileInfo `json:"file,omitempty"` + // destination needs to be provided. e.g. `{ "gcs": { "destination": + // "gs://my-bucket/file.sh" } }` + Gcs *GcsStorageInfo `json:"gcs,omitempty"` + // destination and either the region or endpoint need to be provided. e.g. + // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : + // "us-west-2" } }` Cluster iam role is used to access s3, please make sure + // the cluster iam role in `instance_profile_arn` has permission to write + // data to the s3 destination. + S3 *S3StorageInfo `json:"s3,omitempty"` + // destination needs to be provided. e.g. `{ "volumes" : { "destination" : + // "/Volumes/my-init.sh" } }` + Volumes *VolumesStorageInfo `json:"volumes,omitempty"` + // destination needs to be provided. e.g. `{ "workspace" : { "destination" : + // "/Users/user1@databricks.com/my-init.sh" } }` + Workspace *WorkspaceStorageInfo `json:"workspace,omitempty"` +} + +type InitScriptInfoAndExecutionDetails struct { + // Details about the script + ExecutionDetails *InitScriptExecutionDetails `json:"execution_details,omitempty"` + // The script + Script *InitScriptInfo `json:"script,omitempty"` +} + +type InstallLibraries struct { + // Unique identifier for the cluster on which to install these libraries. + ClusterId string `json:"cluster_id"` + // The libraries to install. + Libraries []Library `json:"libraries"` +} + +type InstallLibrariesResponse struct { +} + +type InstancePoolAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel InstancePoolPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstancePoolAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstancePoolAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InstancePoolAccessControlResponse struct { + // All permissions. + AllPermissions []InstancePoolPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstancePoolAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstancePoolAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InstancePoolAndStats struct { + // Attributes related to instance pools running on Amazon Web Services. If + // not specified at pool creation, a set of default values will be used. + AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to instance pools running on Azure. If not specified + // at pool creation, a set of default values will be used. + AzureAttributes *InstancePoolAzureAttributes `json:"azure_attributes,omitempty"` + // Additional tags for pool resources. Databricks will tag all pool + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Tags that are added by Databricks regardless of any `custom_tags`, + // including: + // + // - Vendor: Databricks + // + // - InstancePoolCreator: + // + // - InstancePoolName: + // + // - InstancePoolId: + DefaultTags map[string]string `json:"default_tags,omitempty"` + // Defines the specification of the disks that will be attached to all spark + // containers. + DiskSpec *DiskSpec `json:"disk_spec,omitempty"` + // Autoscaling Local Storage: when enabled, this instances in this pool will + // dynamically acquire additional disk space when its Spark workers are + // running low on disk space. In AWS, this feature requires specific AWS + // permissions to function correctly - refer to the User Guide for more + // details. + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + // Attributes related to instance pools running on Google Cloud Platform. If + // not specified at pool creation, a set of default values will be used. + GcpAttributes *InstancePoolGcpAttributes `json:"gcp_attributes,omitempty"` + // Automatically terminates the extra instances in the pool cache after they + // are inactive for this time in minutes if min_idle_instances requirement + // is already met. If not set, the extra pool instances will be + // automatically terminated after a default timeout. If specified, the + // threshold must be between 0 and 10000 minutes. Users can also set this + // value to 0 to instantly remove idle instances from the cache if min cache + // size could still hold. + IdleInstanceAutoterminationMinutes int `json:"idle_instance_autotermination_minutes,omitempty"` + // Canonical unique identifier for the pool. + InstancePoolId string `json:"instance_pool_id,omitempty"` + // Pool name requested by the user. Pool name must be unique. Length must be + // between 1 and 100 characters. + InstancePoolName string `json:"instance_pool_name,omitempty"` + // Maximum number of outstanding instances to keep in the pool, including + // both instances used by clusters and idle instances. Clusters that require + // further instance provisioning will fail during upsize requests. + MaxCapacity int `json:"max_capacity,omitempty"` + // Minimum number of idle instances to keep in the instance pool + MinIdleInstances int `json:"min_idle_instances,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id,omitempty"` + // Custom Docker Image BYOC + PreloadedDockerImages []DockerImage `json:"preloaded_docker_images,omitempty"` + // A list containing at most one preloaded Spark image version for the pool. + // Pool-backed clusters started with the preloaded Spark version will start + // faster. A list of available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` + // Current state of the instance pool. + State InstancePoolState `json:"state,omitempty"` + // Usage statistics about the instance pool. + Stats *InstancePoolStats `json:"stats,omitempty"` + // Status of failed pending instances in the pool. + Status *InstancePoolStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstancePoolAndStats) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstancePoolAndStats) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InstancePoolAwsAttributes struct { + // Availability type used for the spot nodes. + // + // The default value is defined by + // InstancePoolConf.instancePoolDefaultAwsAvailability + Availability InstancePoolAwsAttributesAvailability `json:"availability,omitempty"` + // Calculates the bid price for AWS spot instances, as a percentage of the + // corresponding instance type's on-demand price. For example, if this field + // is set to 50, and the cluster needs a new `r3.xlarge` spot instance, then + // the bid price is half of the price of on-demand `r3.xlarge` instances. + // Similarly, if this field is set to 200, the bid price is twice the price + // of on-demand `r3.xlarge` instances. If not specified, the default value + // is 100. When spot instances are requested for this cluster, only spot + // instances whose bid price percentage matches this field will be + // considered. Note that, for safety, we enforce this field to be no more + // than 10000. + // + // The default value and documentation here should be kept consistent with + // CommonConf.defaultSpotBidPricePercent and + // CommonConf.maxSpotBidPricePercent. + SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` + // Identifier for the availability zone/datacenter in which the cluster + // resides. This string will be of a form like "us-west-2a". The provided + // availability zone must be in the same region as the Databricks + // deployment. For example, "us-west-2a" is not a valid zone id if the + // Databricks deployment resides in the "us-east-1" region. This is an + // optional field at cluster creation, and if not specified, a default zone + // will be used. The list of available zones as well as the default value + // can be found by using the `List Zones` method. + ZoneId string `json:"zone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstancePoolAwsAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstancePoolAwsAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Availability type used for the spot nodes. +// +// The default value is defined by +// InstancePoolConf.instancePoolDefaultAwsAvailability +type InstancePoolAwsAttributesAvailability string + +const InstancePoolAwsAttributesAvailabilityOnDemand InstancePoolAwsAttributesAvailability = `ON_DEMAND` + +const InstancePoolAwsAttributesAvailabilitySpot InstancePoolAwsAttributesAvailability = `SPOT` + +// String representation for [fmt.Print] +func (f *InstancePoolAwsAttributesAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InstancePoolAwsAttributesAvailability) Set(v string) error { + switch v { + case `ON_DEMAND`, `SPOT`: + *f = InstancePoolAwsAttributesAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND", "SPOT"`, v) + } +} + +// Type always returns InstancePoolAwsAttributesAvailability to satisfy [pflag.Value] interface +func (f *InstancePoolAwsAttributesAvailability) Type() string { + return "InstancePoolAwsAttributesAvailability" +} + +type InstancePoolAzureAttributes struct { + // Shows the Availability type used for the spot nodes. + // + // The default value is defined by + // InstancePoolConf.instancePoolDefaultAzureAvailability + Availability InstancePoolAzureAttributesAvailability `json:"availability,omitempty"` + // The default value and documentation here should be kept consistent with + // CommonConf.defaultSpotBidMaxPrice. + SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstancePoolAzureAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstancePoolAzureAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Shows the Availability type used for the spot nodes. +// +// The default value is defined by +// InstancePoolConf.instancePoolDefaultAzureAvailability +type InstancePoolAzureAttributesAvailability string + +const InstancePoolAzureAttributesAvailabilityOnDemandAzure InstancePoolAzureAttributesAvailability = `ON_DEMAND_AZURE` + +const InstancePoolAzureAttributesAvailabilitySpotAzure InstancePoolAzureAttributesAvailability = `SPOT_AZURE` + +// String representation for [fmt.Print] +func (f *InstancePoolAzureAttributesAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InstancePoolAzureAttributesAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_AZURE`, `SPOT_AZURE`: + *f = InstancePoolAzureAttributesAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_AZURE", "SPOT_AZURE"`, v) + } +} + +// Type always returns InstancePoolAzureAttributesAvailability to satisfy [pflag.Value] interface +func (f *InstancePoolAzureAttributesAvailability) Type() string { + return "InstancePoolAzureAttributesAvailability" +} + +type InstancePoolGcpAttributes struct { + // This field determines whether the instance pool will contain preemptible + // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs + // if the former is unavailable. + GcpAvailability GcpAvailability `json:"gcp_availability,omitempty"` + // If provided, each node in the instance pool will have this number of + // local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP + // documentation] for the supported number of local SSDs for each instance + // type. + // + // [GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds + LocalSsdCount int `json:"local_ssd_count,omitempty"` + // Identifier for the availability zone/datacenter in which the cluster + // resides. This string will be of a form like "us-west1-a". The provided + // availability zone must be in the same region as the Databricks workspace. + // For example, "us-west1-a" is not a valid zone id if the Databricks + // workspace resides in the "us-east1" region. This is an optional field at + // instance pool creation, and if not specified, a default zone will be + // used. + // + // This field can be one of the following: - "HA" => High availability, + // spread nodes across availability zones for a Databricks deployment region + // - A GCP availability zone => Pick One of the available zones for (machine + // type + region) from https://cloud.google.com/compute/docs/regions-zones + // (e.g. "us-west1-a"). + // + // If empty, Databricks picks an availability zone to schedule the cluster + // on. + ZoneId string `json:"zone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstancePoolGcpAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstancePoolGcpAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InstancePoolPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel InstancePoolPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstancePoolPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstancePoolPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type InstancePoolPermissionLevel string + +const InstancePoolPermissionLevelCanAttachTo InstancePoolPermissionLevel = `CAN_ATTACH_TO` + +const InstancePoolPermissionLevelCanManage InstancePoolPermissionLevel = `CAN_MANAGE` + +// String representation for [fmt.Print] +func (f *InstancePoolPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InstancePoolPermissionLevel) Set(v string) error { + switch v { + case `CAN_ATTACH_TO`, `CAN_MANAGE`: + *f = InstancePoolPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_ATTACH_TO", "CAN_MANAGE"`, v) + } +} + +// Type always returns InstancePoolPermissionLevel to satisfy [pflag.Value] interface +func (f *InstancePoolPermissionLevel) Type() string { + return "InstancePoolPermissionLevel" +} + +type InstancePoolPermissions struct { + AccessControlList []InstancePoolAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstancePoolPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstancePoolPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InstancePoolPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel InstancePoolPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstancePoolPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstancePoolPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InstancePoolPermissionsRequest struct { + AccessControlList []InstancePoolAccessControlRequest `json:"access_control_list,omitempty"` + // The instance pool for which to get or manage permissions. + InstancePoolId string `json:"-" url:"-"` +} + +// Current state of the instance pool. +type InstancePoolState string + +const InstancePoolStateActive InstancePoolState = `ACTIVE` + +const InstancePoolStateDeleted InstancePoolState = `DELETED` + +const InstancePoolStateStopped InstancePoolState = `STOPPED` + +// String representation for [fmt.Print] +func (f *InstancePoolState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InstancePoolState) Set(v string) error { + switch v { + case `ACTIVE`, `DELETED`, `STOPPED`: + *f = InstancePoolState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "DELETED", "STOPPED"`, v) + } +} + +// Type always returns InstancePoolState to satisfy [pflag.Value] interface +func (f *InstancePoolState) Type() string { + return "InstancePoolState" +} + +type InstancePoolStats struct { + // Number of active instances in the pool that are NOT part of a cluster. + IdleCount int `json:"idle_count,omitempty"` + // Number of pending instances in the pool that are NOT part of a cluster. + PendingIdleCount int `json:"pending_idle_count,omitempty"` + // Number of pending instances in the pool that are part of a cluster. + PendingUsedCount int `json:"pending_used_count,omitempty"` + // Number of active instances in the pool that are part of a cluster. + UsedCount int `json:"used_count,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstancePoolStats) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstancePoolStats) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InstancePoolStatus struct { + // List of error messages for the failed pending instances. The + // pending_instance_errors follows FIFO with maximum length of the min_idle + // of the pool. The pending_instance_errors is emptied once the number of + // exiting available instances reaches the min_idle of the pool. + PendingInstanceErrors []PendingInstanceError `json:"pending_instance_errors,omitempty"` +} + +type InstanceProfile struct { + // The AWS IAM role ARN of the role associated with the instance profile. + // This field is required if your role name and instance profile name do not + // match and you want to use the instance profile with [Databricks SQL + // Serverless]. + // + // Otherwise, this field is optional. + // + // [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html + IamRoleArn string `json:"iam_role_arn,omitempty"` + // The AWS ARN of the instance profile to register with Databricks. This + // field is required. + InstanceProfileArn string `json:"instance_profile_arn"` + // Boolean flag indicating whether the instance profile should only be used + // in credential passthrough scenarios. If true, it means the instance + // profile contains an meta IAM role which could assume a wide range of + // roles. Therefore it should always be used with authorization. This field + // is optional, the default value is `false`. + IsMetaInstanceProfile bool `json:"is_meta_instance_profile,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstanceProfile) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstanceProfile) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The kind of compute described by this compute specification. +// +// Depending on `kind`, different validations and default values will be +// applied. +// +// The first usage of this value is for the simple cluster form where it sets +// `kind = CLASSIC_PREVIEW`. +type Kind string + +const KindClassicPreview Kind = `CLASSIC_PREVIEW` + +// String representation for [fmt.Print] +func (f *Kind) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Kind) Set(v string) error { + switch v { + case `CLASSIC_PREVIEW`: + *f = Kind(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC_PREVIEW"`, v) + } +} + +// Type always returns Kind to satisfy [pflag.Value] interface +func (f *Kind) Type() string { + return "Kind" +} + +type Language string + +const LanguagePython Language = `python` + +const LanguageScala Language = `scala` + +const LanguageSql Language = `sql` + +// String representation for [fmt.Print] +func (f *Language) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Language) Set(v string) error { + switch v { + case `python`, `scala`, `sql`: + *f = Language(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "python", "scala", "sql"`, v) + } +} + +// Type always returns Language to satisfy [pflag.Value] interface +func (f *Language) Type() string { + return "Language" +} + +type Library struct { + // Specification of a CRAN library to be installed as part of the library + Cran *RCranLibrary `json:"cran,omitempty"` + // Deprecated. URI of the egg library to install. Installing Python egg + // files is deprecated and is not supported in Databricks Runtime 14.0 and + // above. + Egg string `json:"egg,omitempty"` + // URI of the JAR library to install. Supported URIs include Workspace + // paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "jar": + // "/Workspace/path/to/library.jar" }`, `{ "jar" : + // "/Volumes/path/to/library.jar" }` or `{ "jar": + // "s3://my-bucket/library.jar" }`. If S3 is used, please make sure the + // cluster has read access on the library. You may need to launch the + // cluster with an IAM role to access the S3 URI. + Jar string `json:"jar,omitempty"` + // Specification of a maven library to be installed. For example: `{ + // "coordinates": "org.jsoup:jsoup:1.7.2" }` + Maven *MavenLibrary `json:"maven,omitempty"` + // Specification of a PyPi library to be installed. For example: `{ + // "package": "simplejson" }` + Pypi *PythonPyPiLibrary `json:"pypi,omitempty"` + // URI of the requirements.txt file to install. Only Workspace paths and + // Unity Catalog Volumes paths are supported. For example: `{ + // "requirements": "/Workspace/path/to/requirements.txt" }` or `{ + // "requirements" : "/Volumes/path/to/requirements.txt" }` + Requirements string `json:"requirements,omitempty"` + // URI of the wheel library to install. Supported URIs include Workspace + // paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "whl": + // "/Workspace/path/to/library.whl" }`, `{ "whl" : + // "/Volumes/path/to/library.whl" }` or `{ "whl": + // "s3://my-bucket/library.whl" }`. If S3 is used, please make sure the + // cluster has read access on the library. You may need to launch the + // cluster with an IAM role to access the S3 URI. + Whl string `json:"whl,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Library) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Library) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The status of the library on a specific cluster. +type LibraryFullStatus struct { + // Whether the library was set to be installed on all clusters via the + // libraries UI. + IsLibraryForAllClusters bool `json:"is_library_for_all_clusters,omitempty"` + // Unique identifier for the library. + Library *Library `json:"library,omitempty"` + // All the info and warning messages that have occurred so far for this + // library. + Messages []string `json:"messages,omitempty"` + // Status of installing the library on the cluster. + Status LibraryInstallStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LibraryFullStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LibraryFullStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The status of a library on a specific cluster. +type LibraryInstallStatus string + +const LibraryInstallStatusFailed LibraryInstallStatus = `FAILED` + +const LibraryInstallStatusInstalled LibraryInstallStatus = `INSTALLED` + +const LibraryInstallStatusInstalling LibraryInstallStatus = `INSTALLING` + +const LibraryInstallStatusPending LibraryInstallStatus = `PENDING` + +const LibraryInstallStatusResolving LibraryInstallStatus = `RESOLVING` + +const LibraryInstallStatusRestored LibraryInstallStatus = `RESTORED` + +const LibraryInstallStatusSkipped LibraryInstallStatus = `SKIPPED` + +const LibraryInstallStatusUninstallOnRestart LibraryInstallStatus = `UNINSTALL_ON_RESTART` + +// String representation for [fmt.Print] +func (f *LibraryInstallStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *LibraryInstallStatus) Set(v string) error { + switch v { + case `FAILED`, `INSTALLED`, `INSTALLING`, `PENDING`, `RESOLVING`, `RESTORED`, `SKIPPED`, `UNINSTALL_ON_RESTART`: + *f = LibraryInstallStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED", "INSTALLED", "INSTALLING", "PENDING", "RESOLVING", "RESTORED", "SKIPPED", "UNINSTALL_ON_RESTART"`, v) + } +} + +// Type always returns LibraryInstallStatus to satisfy [pflag.Value] interface +func (f *LibraryInstallStatus) Type() string { + return "LibraryInstallStatus" +} + +type ListAllClusterLibraryStatusesResponse struct { + // A list of cluster statuses. + Statuses []ClusterLibraryStatuses `json:"statuses,omitempty"` +} + +type ListAvailableZonesResponse struct { + // The availability zone if no `zone_id` is provided in the cluster creation + // request. + DefaultZone string `json:"default_zone,omitempty"` + // The list of available zones (e.g., ['us-west-2c', 'us-east-2']). + Zones []string `json:"zones,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAvailableZonesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAvailableZonesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List cluster policy compliance +type ListClusterCompliancesRequest struct { + // Use this field to specify the maximum number of results to be returned by + // the server. The server may further constrain the maximum number of + // results returned in a single page. + PageSize int `json:"-" url:"page_size,omitempty"` + // A page token that can be used to navigate to the next page or previous + // page as returned by `next_page_token` or `prev_page_token`. + PageToken string `json:"-" url:"page_token,omitempty"` + // Canonical unique identifier for the cluster policy. + PolicyId string `json:"-" url:"policy_id"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListClusterCompliancesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListClusterCompliancesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListClusterCompliancesResponse struct { + // A list of clusters and their policy compliance statuses. + Clusters []ClusterCompliance `json:"clusters,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is "", it means no further results for the request. + NextPageToken string `json:"next_page_token,omitempty"` + // This field represents the pagination token to retrieve the previous page + // of results. If the value is "", it means no further results for the + // request. + PrevPageToken string `json:"prev_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListClusterCompliancesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListClusterCompliancesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List cluster policies +type ListClusterPoliciesRequest struct { + // The cluster policy attribute to sort by. * `POLICY_CREATION_TIME` - Sort + // result list by policy creation time. * `POLICY_NAME` - Sort result list + // by policy name. + SortColumn ListSortColumn `json:"-" url:"sort_column,omitempty"` + // The order in which the policies get listed. * `DESC` - Sort result list + // in descending order. * `ASC` - Sort result list in ascending order. + SortOrder ListSortOrder `json:"-" url:"sort_order,omitempty"` +} + +type ListClustersFilterBy struct { + // The source of cluster creation. + ClusterSources []ClusterSource `json:"cluster_sources,omitempty" url:"cluster_sources,omitempty"` + // The current state of the clusters. + ClusterStates []State `json:"cluster_states,omitempty" url:"cluster_states,omitempty"` + // Whether the clusters are pinned or not. + IsPinned bool `json:"is_pinned,omitempty" url:"is_pinned,omitempty"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId string `json:"policy_id,omitempty" url:"policy_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListClustersFilterBy) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListClustersFilterBy) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List clusters +type ListClustersRequest struct { + // Filters to apply to the list of clusters. + FilterBy *ListClustersFilterBy `json:"-" url:"filter_by,omitempty"` + // Use this field to specify the maximum number of results to be returned by + // the server. The server may further constrain the maximum number of + // results returned in a single page. + PageSize int `json:"-" url:"page_size,omitempty"` + // Use next_page_token or prev_page_token returned from the previous request + // to list the next or previous page of clusters respectively. + PageToken string `json:"-" url:"page_token,omitempty"` + // Sort the list of clusters by a specific criteria. + SortBy *ListClustersSortBy `json:"-" url:"sort_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListClustersRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListClustersRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListClustersResponse struct { + // + Clusters []ClusterDetails `json:"clusters,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is "", it means no further results for the request. + NextPageToken string `json:"next_page_token,omitempty"` + // This field represents the pagination token to retrieve the previous page + // of results. If the value is "", it means no further results for the + // request. + PrevPageToken string `json:"prev_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListClustersResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListClustersResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListClustersSortBy struct { + // The direction to sort by. + Direction ListClustersSortByDirection `json:"direction,omitempty" url:"direction,omitempty"` + // The sorting criteria. By default, clusters are sorted by 3 columns from + // highest to lowest precedence: cluster state, pinned or unpinned, then + // cluster name. + Field ListClustersSortByField `json:"field,omitempty" url:"field,omitempty"` +} + +// The direction to sort by. +type ListClustersSortByDirection string + +const ListClustersSortByDirectionAsc ListClustersSortByDirection = `ASC` + +const ListClustersSortByDirectionDesc ListClustersSortByDirection = `DESC` + +// String representation for [fmt.Print] +func (f *ListClustersSortByDirection) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListClustersSortByDirection) Set(v string) error { + switch v { + case `ASC`, `DESC`: + *f = ListClustersSortByDirection(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ASC", "DESC"`, v) + } +} + +// Type always returns ListClustersSortByDirection to satisfy [pflag.Value] interface +func (f *ListClustersSortByDirection) Type() string { + return "ListClustersSortByDirection" +} + +// The sorting criteria. By default, clusters are sorted by 3 columns from +// highest to lowest precedence: cluster state, pinned or unpinned, then cluster +// name. +type ListClustersSortByField string + +const ListClustersSortByFieldClusterName ListClustersSortByField = `CLUSTER_NAME` + +const ListClustersSortByFieldDefault ListClustersSortByField = `DEFAULT` + +// String representation for [fmt.Print] +func (f *ListClustersSortByField) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListClustersSortByField) Set(v string) error { + switch v { + case `CLUSTER_NAME`, `DEFAULT`: + *f = ListClustersSortByField(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLUSTER_NAME", "DEFAULT"`, v) + } +} + +// Type always returns ListClustersSortByField to satisfy [pflag.Value] interface +func (f *ListClustersSortByField) Type() string { + return "ListClustersSortByField" +} + +type ListGlobalInitScriptsResponse struct { + Scripts []GlobalInitScriptDetails `json:"scripts,omitempty"` +} + +type ListInstancePools struct { + InstancePools []InstancePoolAndStats `json:"instance_pools,omitempty"` +} + +type ListInstanceProfilesResponse struct { + // A list of instance profiles that the user can access. + InstanceProfiles []InstanceProfile `json:"instance_profiles,omitempty"` +} + +type ListNodeTypesResponse struct { + // The list of available Spark node types. + NodeTypes []NodeType `json:"node_types,omitempty"` +} + +type ListPoliciesResponse struct { + // List of policies. + Policies []Policy `json:"policies,omitempty"` +} + +// List policy families +type ListPolicyFamiliesRequest struct { + // Maximum number of policy families to return. + MaxResults int64 `json:"-" url:"max_results,omitempty"` + // A token that can be used to get the next page of results. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListPolicyFamiliesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListPolicyFamiliesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListPolicyFamiliesResponse struct { + // A token that can be used to get the next page of results. If not present, + // there are no more results to show. + NextPageToken string `json:"next_page_token,omitempty"` + // List of policy families. + PolicyFamilies []PolicyFamily `json:"policy_families,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListPolicyFamiliesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListPolicyFamiliesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListSortColumn string + +const ListSortColumnPolicyCreationTime ListSortColumn = `POLICY_CREATION_TIME` + +const ListSortColumnPolicyName ListSortColumn = `POLICY_NAME` + +// String representation for [fmt.Print] +func (f *ListSortColumn) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListSortColumn) Set(v string) error { + switch v { + case `POLICY_CREATION_TIME`, `POLICY_NAME`: + *f = ListSortColumn(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "POLICY_CREATION_TIME", "POLICY_NAME"`, v) + } +} + +// Type always returns ListSortColumn to satisfy [pflag.Value] interface +func (f *ListSortColumn) Type() string { + return "ListSortColumn" +} + +// A generic ordering enum for list-based queries. +type ListSortOrder string + +const ListSortOrderAsc ListSortOrder = `ASC` + +const ListSortOrderDesc ListSortOrder = `DESC` + +// String representation for [fmt.Print] +func (f *ListSortOrder) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListSortOrder) Set(v string) error { + switch v { + case `ASC`, `DESC`: + *f = ListSortOrder(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ASC", "DESC"`, v) + } +} + +// Type always returns ListSortOrder to satisfy [pflag.Value] interface +func (f *ListSortOrder) Type() string { + return "ListSortOrder" +} + +type LocalFileInfo struct { + // local file destination, e.g. `file:/my/local/file.sh` + Destination string `json:"destination"` +} + +type LogAnalyticsInfo struct { + // + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + // + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogAnalyticsInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogAnalyticsInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type LogSyncStatus struct { + // The timestamp of last attempt. If the last attempt fails, + // `last_exception` will contain the exception in the last attempt. + LastAttempted int64 `json:"last_attempted,omitempty"` + // The exception thrown in the last attempt, it would be null (omitted in + // the response) if there is no exception in last attempted. + LastException string `json:"last_exception,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogSyncStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogSyncStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type MavenLibrary struct { + // Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2". + Coordinates string `json:"coordinates"` + // List of dependences to exclude. For example: `["slf4j:slf4j", + // "*:hadoop-client"]`. + // + // Maven dependency exclusions: + // https://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html. + Exclusions []string `json:"exclusions,omitempty"` + // Maven repo to install the Maven package from. If omitted, both Maven + // Central Repository and Spark Packages are searched. + Repo string `json:"repo,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MavenLibrary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MavenLibrary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type NodeInstanceType struct { + InstanceTypeId string `json:"instance_type_id,omitempty"` + + LocalDiskSizeGb int `json:"local_disk_size_gb,omitempty"` + + LocalDisks int `json:"local_disks,omitempty"` + + LocalNvmeDiskSizeGb int `json:"local_nvme_disk_size_gb,omitempty"` + + LocalNvmeDisks int `json:"local_nvme_disks,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NodeInstanceType) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NodeInstanceType) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type NodeType struct { + Category string `json:"category,omitempty"` + // A string description associated with this node type, e.g., "r3.xlarge". + Description string `json:"description"` + + DisplayOrder int `json:"display_order,omitempty"` + // An identifier for the type of hardware that this node runs on, e.g., + // "r3.2xlarge" in AWS. + InstanceTypeId string `json:"instance_type_id"` + // Whether the node type is deprecated. Non-deprecated node types offer + // greater performance. + IsDeprecated bool `json:"is_deprecated,omitempty"` + // AWS specific, whether this instance supports encryption in transit, used + // for hipaa and pci workloads. + IsEncryptedInTransit bool `json:"is_encrypted_in_transit,omitempty"` + + IsGraviton bool `json:"is_graviton,omitempty"` + + IsHidden bool `json:"is_hidden,omitempty"` + + IsIoCacheEnabled bool `json:"is_io_cache_enabled,omitempty"` + // Memory (in MB) available for this node type. + MemoryMb int `json:"memory_mb"` + + NodeInfo *CloudProviderNodeInfo `json:"node_info,omitempty"` + + NodeInstanceType *NodeInstanceType `json:"node_instance_type,omitempty"` + // Unique identifier for this node type. + NodeTypeId string `json:"node_type_id"` + // Number of CPU cores available for this node type. Note that this can be + // fractional, e.g., 2.5 cores, if the the number of cores on a machine + // instance is not divisible by the number of Spark nodes on that machine. + NumCores float64 `json:"num_cores"` + + NumGpus int `json:"num_gpus,omitempty"` + + PhotonDriverCapable bool `json:"photon_driver_capable,omitempty"` + + PhotonWorkerCapable bool `json:"photon_worker_capable,omitempty"` + + SupportClusterTags bool `json:"support_cluster_tags,omitempty"` + + SupportEbsVolumes bool `json:"support_ebs_volumes,omitempty"` + + SupportPortForwarding bool `json:"support_port_forwarding,omitempty"` + // Indicates if this node type can be used for an instance pool or cluster + // with elastic disk enabled. This is true for most node types. + SupportsElasticDisk bool `json:"supports_elastic_disk,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NodeType) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NodeType) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PendingInstanceError struct { + InstanceId string `json:"instance_id,omitempty"` + + Message string `json:"message,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PendingInstanceError) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PendingInstanceError) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PermanentDeleteCluster struct { + // The cluster to be deleted. + ClusterId string `json:"cluster_id"` +} + +type PermanentDeleteClusterResponse struct { +} + +type PinCluster struct { + // + ClusterId string `json:"cluster_id"` +} + +type PinClusterResponse struct { +} + +// Describes a Cluster Policy entity. +type Policy struct { + // Creation time. The timestamp (in millisecond) when this Cluster Policy + // was created. + CreatedAtTimestamp int64 `json:"created_at_timestamp,omitempty"` + // Creator user name. The field won't be included in the response if the + // user has already been deleted. + CreatorUserName string `json:"creator_user_name,omitempty"` + // Policy definition document expressed in [Databricks Cluster Policy + // Definition Language]. + // + // [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + Definition string `json:"definition,omitempty"` + // Additional human-readable description of the cluster policy. + Description string `json:"description,omitempty"` + // If true, policy is a default policy created and managed by Databricks. + // Default policies cannot be deleted, and their policy families cannot be + // changed. + IsDefault bool `json:"is_default,omitempty"` + // A list of libraries to be installed on the next cluster restart that uses + // this policy. The maximum number of libraries is 500. + Libraries []Library `json:"libraries,omitempty"` + // Max number of clusters per user that can be active using this policy. If + // not present, there is no max limit. + MaxClustersPerUser int64 `json:"max_clusters_per_user,omitempty"` + // Cluster Policy name requested by the user. This has to be unique. Length + // must be between 1 and 100 characters. + Name string `json:"name,omitempty"` + // Policy definition JSON document expressed in [Databricks Policy + // Definition Language]. The JSON document must be passed as a string and + // cannot be embedded in the requests. + // + // You can use this to customize the policy definition inherited from the + // policy family. Policy rules specified here are merged into the inherited + // policy definition. + // + // [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"` + // ID of the policy family. The cluster policy's policy definition inherits + // the policy family's policy definition. + // + // Cannot be used with `definition`. Use + // `policy_family_definition_overrides` instead to customize the policy + // definition. + PolicyFamilyId string `json:"policy_family_id,omitempty"` + // Canonical unique identifier for the Cluster Policy. + PolicyId string `json:"policy_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Policy) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Policy) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PolicyFamily struct { + // Policy definition document expressed in [Databricks Cluster Policy + // Definition Language]. + // + // [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + Definition string `json:"definition,omitempty"` + // Human-readable description of the purpose of the policy family. + Description string `json:"description,omitempty"` + // Name of the policy family. + Name string `json:"name,omitempty"` + // Unique identifier for the policy family. + PolicyFamilyId string `json:"policy_family_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PolicyFamily) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PolicyFamily) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PythonPyPiLibrary struct { + // The name of the pypi package to install. An optional exact version + // specification is also supported. Examples: "simplejson" and + // "simplejson==3.8.0". + Package string `json:"package"` + // The repository where the package can be found. If not specified, the + // default pip index is used. + Repo string `json:"repo,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PythonPyPiLibrary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PythonPyPiLibrary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RCranLibrary struct { + // The name of the CRAN package to install. + Package string `json:"package"` + // The repository where the package can be found. If not specified, the + // default CRAN repo is used. + Repo string `json:"repo,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RCranLibrary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RCranLibrary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RemoveInstanceProfile struct { + // The ARN of the instance profile to remove. This field is required. + InstanceProfileArn string `json:"instance_profile_arn"` +} + +type RemoveResponse struct { +} + +type ResizeCluster struct { + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `json:"autoscale,omitempty"` + // The cluster to be resized. + ClusterId string `json:"cluster_id"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers int `json:"num_workers,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResizeCluster) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResizeCluster) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ResizeClusterResponse struct { +} + +type RestartCluster struct { + // The cluster to be started. + ClusterId string `json:"cluster_id"` + // + RestartUser string `json:"restart_user,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RestartCluster) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RestartCluster) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RestartClusterResponse struct { +} + +type ResultType string + +const ResultTypeError ResultType = `error` + +const ResultTypeImage ResultType = `image` + +const ResultTypeImages ResultType = `images` + +const ResultTypeTable ResultType = `table` + +const ResultTypeText ResultType = `text` + +// String representation for [fmt.Print] +func (f *ResultType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ResultType) Set(v string) error { + switch v { + case `error`, `image`, `images`, `table`, `text`: + *f = ResultType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "error", "image", "images", "table", "text"`, v) + } +} + +// Type always returns ResultType to satisfy [pflag.Value] interface +func (f *ResultType) Type() string { + return "ResultType" +} + +type Results struct { + // The cause of the error + Cause string `json:"cause,omitempty"` + + Data any `json:"data,omitempty"` + // The image filename + FileName string `json:"fileName,omitempty"` + + FileNames []string `json:"fileNames,omitempty"` + // true if a JSON schema is returned instead of a string representation of + // the Hive type. + IsJsonSchema bool `json:"isJsonSchema,omitempty"` + // internal field used by SDK + Pos int `json:"pos,omitempty"` + + ResultType ResultType `json:"resultType,omitempty"` + // The table schema + Schema []map[string]any `json:"schema,omitempty"` + // The summary of the error + Summary string `json:"summary,omitempty"` + // true if partial results are returned. + Truncated bool `json:"truncated,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Results) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Results) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Determines the cluster's runtime engine, either standard or Photon. +// +// This field is not compatible with legacy `spark_version` values that contain +// `-photon-`. Remove `-photon-` from the `spark_version` and set +// `runtime_engine` to `PHOTON`. +// +// If left unspecified, the runtime engine defaults to standard unless the +// spark_version contains -photon-, in which case Photon will be used. +type RuntimeEngine string + +const RuntimeEngineNull RuntimeEngine = `NULL` + +const RuntimeEnginePhoton RuntimeEngine = `PHOTON` + +const RuntimeEngineStandard RuntimeEngine = `STANDARD` + +// String representation for [fmt.Print] +func (f *RuntimeEngine) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RuntimeEngine) Set(v string) error { + switch v { + case `NULL`, `PHOTON`, `STANDARD`: + *f = RuntimeEngine(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "NULL", "PHOTON", "STANDARD"`, v) + } +} + +// Type always returns RuntimeEngine to satisfy [pflag.Value] interface +func (f *RuntimeEngine) Type() string { + return "RuntimeEngine" +} + +type S3StorageInfo struct { + // (Optional) Set canned access control list for the logs, e.g. + // `bucket-owner-full-control`. If `canned_cal` is set, please make sure the + // cluster iam role has `s3:PutObjectAcl` permission on the destination + // bucket and prefix. The full list of possible canned acl can be found at + // http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl. + // Please also note that by default only the object owner gets full + // controls. If you are using cross account role for writing data, you may + // want to set `bucket-owner-full-control` to make bucket owner able to read + // the logs. + CannedAcl string `json:"canned_acl,omitempty"` + // S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be + // delivered using cluster iam role, please make sure you set cluster iam + // role and the role has write access to the destination. Please also note + // that you cannot use AWS keys to deliver logs. + Destination string `json:"destination"` + // (Optional) Flag to enable server side encryption, `false` by default. + EnableEncryption bool `json:"enable_encryption,omitempty"` + // (Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It + // will be used only when encryption is enabled and the default type is + // `sse-s3`. + EncryptionType string `json:"encryption_type,omitempty"` + // S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or + // endpoint needs to be set. If both are set, endpoint will be used. + Endpoint string `json:"endpoint,omitempty"` + // (Optional) Kms key which will be used if encryption is enabled and + // encryption type is set to `sse-kms`. + KmsKey string `json:"kms_key,omitempty"` + // S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. + // If both are set, endpoint will be used. + Region string `json:"region,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *S3StorageInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s S3StorageInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SparkNode struct { + // The private IP address of the host instance. + HostPrivateIp string `json:"host_private_ip,omitempty"` + // Globally unique identifier for the host instance from the cloud provider. + InstanceId string `json:"instance_id,omitempty"` + // Attributes specific to AWS for a Spark node. + NodeAwsAttributes *SparkNodeAwsAttributes `json:"node_aws_attributes,omitempty"` + // Globally unique identifier for this node. + NodeId string `json:"node_id,omitempty"` + // Private IP address (typically a 10.x.x.x address) of the Spark node. Note + // that this is different from the private IP address of the host instance. + PrivateIp string `json:"private_ip,omitempty"` + // Public DNS address of this node. This address can be used to access the + // Spark JDBC server on the driver node. To communicate with the JDBC + // server, traffic must be manually authorized by adding security group + // rules to the "worker-unmanaged" security group via the AWS console. + // + // Actually it's the public DNS address of the host instance. + PublicDns string `json:"public_dns,omitempty"` + // The timestamp (in millisecond) when the Spark node is launched. + // + // The start_timestamp is set right before the container is being launched. + // The timestamp when the container is placed on the ResourceManager, before + // its launch and setup by the NodeDaemon. This timestamp is the same as the + // creation timestamp in the database. + StartTimestamp int64 `json:"start_timestamp,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SparkNode) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SparkNode) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SparkNodeAwsAttributes struct { + // Whether this node is on an Amazon spot instance. + IsSpot bool `json:"is_spot,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SparkNodeAwsAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SparkNodeAwsAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SparkVersion struct { + // Spark version key, for example "2.1.x-scala2.11". This is the value which + // should be provided as the "spark_version" when creating a new cluster. + // Note that the exact Spark version may change over time for a "wildcard" + // version (i.e., "2.1.x-scala2.11" is a "wildcard" version) with minor bug + // fixes. + Key string `json:"key,omitempty"` + // A descriptive name for this Spark version, for example "Spark 2.1". + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SparkVersion) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SparkVersion) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type StartCluster struct { + // The cluster to be started. + ClusterId string `json:"cluster_id"` +} + +type StartClusterResponse struct { +} + +// Current state of the cluster. +type State string + +const StateError State = `ERROR` + +const StatePending State = `PENDING` + +const StateResizing State = `RESIZING` + +const StateRestarting State = `RESTARTING` + +const StateRunning State = `RUNNING` + +const StateTerminated State = `TERMINATED` + +const StateTerminating State = `TERMINATING` + +const StateUnknown State = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *State) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *State) Set(v string) error { + switch v { + case `ERROR`, `PENDING`, `RESIZING`, `RESTARTING`, `RUNNING`, `TERMINATED`, `TERMINATING`, `UNKNOWN`: + *f = State(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ERROR", "PENDING", "RESIZING", "RESTARTING", "RUNNING", "TERMINATED", "TERMINATING", "UNKNOWN"`, v) + } +} + +// Type always returns State to satisfy [pflag.Value] interface +func (f *State) Type() string { + return "State" +} + +type TerminationReason struct { + // status code indicating why the cluster was terminated + Code TerminationReasonCode `json:"code,omitempty"` + // list of parameters that provide additional information about why the + // cluster was terminated + Parameters map[string]string `json:"parameters,omitempty"` + // type of the termination + Type TerminationReasonType `json:"type,omitempty"` +} + +// status code indicating why the cluster was terminated +type TerminationReasonCode string + +const TerminationReasonCodeAbuseDetected TerminationReasonCode = `ABUSE_DETECTED` + +const TerminationReasonCodeAttachProjectFailure TerminationReasonCode = `ATTACH_PROJECT_FAILURE` + +const TerminationReasonCodeAwsAuthorizationFailure TerminationReasonCode = `AWS_AUTHORIZATION_FAILURE` + +const TerminationReasonCodeAwsInsufficientFreeAddressesInSubnetFailure TerminationReasonCode = `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE` + +const TerminationReasonCodeAwsInsufficientInstanceCapacityFailure TerminationReasonCode = `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE` + +const TerminationReasonCodeAwsMaxSpotInstanceCountExceededFailure TerminationReasonCode = `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE` + +const TerminationReasonCodeAwsRequestLimitExceeded TerminationReasonCode = `AWS_REQUEST_LIMIT_EXCEEDED` + +const TerminationReasonCodeAwsUnsupportedFailure TerminationReasonCode = `AWS_UNSUPPORTED_FAILURE` + +const TerminationReasonCodeAzureByokKeyPermissionFailure TerminationReasonCode = `AZURE_BYOK_KEY_PERMISSION_FAILURE` + +const TerminationReasonCodeAzureEphemeralDiskFailure TerminationReasonCode = `AZURE_EPHEMERAL_DISK_FAILURE` + +const TerminationReasonCodeAzureInvalidDeploymentTemplate TerminationReasonCode = `AZURE_INVALID_DEPLOYMENT_TEMPLATE` + +const TerminationReasonCodeAzureOperationNotAllowedException TerminationReasonCode = `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION` + +const TerminationReasonCodeAzureQuotaExceededException TerminationReasonCode = `AZURE_QUOTA_EXCEEDED_EXCEPTION` + +const TerminationReasonCodeAzureResourceManagerThrottling TerminationReasonCode = `AZURE_RESOURCE_MANAGER_THROTTLING` + +const TerminationReasonCodeAzureResourceProviderThrottling TerminationReasonCode = `AZURE_RESOURCE_PROVIDER_THROTTLING` + +const TerminationReasonCodeAzureUnexpectedDeploymentTemplateFailure TerminationReasonCode = `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE` + +const TerminationReasonCodeAzureVmExtensionFailure TerminationReasonCode = `AZURE_VM_EXTENSION_FAILURE` + +const TerminationReasonCodeAzureVnetConfigurationFailure TerminationReasonCode = `AZURE_VNET_CONFIGURATION_FAILURE` + +const TerminationReasonCodeBootstrapTimeout TerminationReasonCode = `BOOTSTRAP_TIMEOUT` + +const TerminationReasonCodeBootstrapTimeoutCloudProviderException TerminationReasonCode = `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION` + +const TerminationReasonCodeCloudProviderDiskSetupFailure TerminationReasonCode = `CLOUD_PROVIDER_DISK_SETUP_FAILURE` + +const TerminationReasonCodeCloudProviderLaunchFailure TerminationReasonCode = `CLOUD_PROVIDER_LAUNCH_FAILURE` + +const TerminationReasonCodeCloudProviderResourceStockout TerminationReasonCode = `CLOUD_PROVIDER_RESOURCE_STOCKOUT` + +const TerminationReasonCodeCloudProviderShutdown TerminationReasonCode = `CLOUD_PROVIDER_SHUTDOWN` + +const TerminationReasonCodeCommunicationLost TerminationReasonCode = `COMMUNICATION_LOST` + +const TerminationReasonCodeContainerLaunchFailure TerminationReasonCode = `CONTAINER_LAUNCH_FAILURE` + +const TerminationReasonCodeControlPlaneRequestFailure TerminationReasonCode = `CONTROL_PLANE_REQUEST_FAILURE` + +const TerminationReasonCodeDatabaseConnectionFailure TerminationReasonCode = `DATABASE_CONNECTION_FAILURE` + +const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE` + +const TerminationReasonCodeDriverUnreachable TerminationReasonCode = `DRIVER_UNREACHABLE` + +const TerminationReasonCodeDriverUnresponsive TerminationReasonCode = `DRIVER_UNRESPONSIVE` + +const TerminationReasonCodeExecutionComponentUnhealthy TerminationReasonCode = `EXECUTION_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeGcpQuotaExceeded TerminationReasonCode = `GCP_QUOTA_EXCEEDED` + +const TerminationReasonCodeGcpServiceAccountDeleted TerminationReasonCode = `GCP_SERVICE_ACCOUNT_DELETED` + +const TerminationReasonCodeGlobalInitScriptFailure TerminationReasonCode = `GLOBAL_INIT_SCRIPT_FAILURE` + +const TerminationReasonCodeHiveMetastoreProvisioningFailure TerminationReasonCode = `HIVE_METASTORE_PROVISIONING_FAILURE` + +const TerminationReasonCodeImagePullPermissionDenied TerminationReasonCode = `IMAGE_PULL_PERMISSION_DENIED` + +const TerminationReasonCodeInactivity TerminationReasonCode = `INACTIVITY` + +const TerminationReasonCodeInitScriptFailure TerminationReasonCode = `INIT_SCRIPT_FAILURE` + +const TerminationReasonCodeInstancePoolClusterFailure TerminationReasonCode = `INSTANCE_POOL_CLUSTER_FAILURE` + +const TerminationReasonCodeInstanceUnreachable TerminationReasonCode = `INSTANCE_UNREACHABLE` + +const TerminationReasonCodeInternalError TerminationReasonCode = `INTERNAL_ERROR` + +const TerminationReasonCodeInvalidArgument TerminationReasonCode = `INVALID_ARGUMENT` + +const TerminationReasonCodeInvalidSparkImage TerminationReasonCode = `INVALID_SPARK_IMAGE` + +const TerminationReasonCodeIpExhaustionFailure TerminationReasonCode = `IP_EXHAUSTION_FAILURE` + +const TerminationReasonCodeJobFinished TerminationReasonCode = `JOB_FINISHED` + +const TerminationReasonCodeK8sAutoscalingFailure TerminationReasonCode = `K8S_AUTOSCALING_FAILURE` + +const TerminationReasonCodeK8sDbrClusterLaunchTimeout TerminationReasonCode = `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT` + +const TerminationReasonCodeMetastoreComponentUnhealthy TerminationReasonCode = `METASTORE_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeNephosResourceManagement TerminationReasonCode = `NEPHOS_RESOURCE_MANAGEMENT` + +const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = `NETWORK_CONFIGURATION_FAILURE` + +const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE` + +const TerminationReasonCodeNpipTunnelSetupFailure TerminationReasonCode = `NPIP_TUNNEL_SETUP_FAILURE` + +const TerminationReasonCodeNpipTunnelTokenFailure TerminationReasonCode = `NPIP_TUNNEL_TOKEN_FAILURE` + +const TerminationReasonCodeRequestRejected TerminationReasonCode = `REQUEST_REJECTED` + +const TerminationReasonCodeRequestThrottled TerminationReasonCode = `REQUEST_THROTTLED` + +const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR` + +const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION` + +const TerminationReasonCodeSelfBootstrapFailure TerminationReasonCode = `SELF_BOOTSTRAP_FAILURE` + +const TerminationReasonCodeSkippedSlowNodes TerminationReasonCode = `SKIPPED_SLOW_NODES` + +const TerminationReasonCodeSlowImageDownload TerminationReasonCode = `SLOW_IMAGE_DOWNLOAD` + +const TerminationReasonCodeSparkError TerminationReasonCode = `SPARK_ERROR` + +const TerminationReasonCodeSparkImageDownloadFailure TerminationReasonCode = `SPARK_IMAGE_DOWNLOAD_FAILURE` + +const TerminationReasonCodeSparkStartupFailure TerminationReasonCode = `SPARK_STARTUP_FAILURE` + +const TerminationReasonCodeSpotInstanceTermination TerminationReasonCode = `SPOT_INSTANCE_TERMINATION` + +const TerminationReasonCodeStorageDownloadFailure TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE` + +const TerminationReasonCodeStsClientSetupFailure TerminationReasonCode = `STS_CLIENT_SETUP_FAILURE` + +const TerminationReasonCodeSubnetExhaustedFailure TerminationReasonCode = `SUBNET_EXHAUSTED_FAILURE` + +const TerminationReasonCodeTemporarilyUnavailable TerminationReasonCode = `TEMPORARILY_UNAVAILABLE` + +const TerminationReasonCodeTrialExpired TerminationReasonCode = `TRIAL_EXPIRED` + +const TerminationReasonCodeUnexpectedLaunchFailure TerminationReasonCode = `UNEXPECTED_LAUNCH_FAILURE` + +const TerminationReasonCodeUnknown TerminationReasonCode = `UNKNOWN` + +const TerminationReasonCodeUnsupportedInstanceType TerminationReasonCode = `UNSUPPORTED_INSTANCE_TYPE` + +const TerminationReasonCodeUpdateInstanceProfileFailure TerminationReasonCode = `UPDATE_INSTANCE_PROFILE_FAILURE` + +const TerminationReasonCodeUserRequest TerminationReasonCode = `USER_REQUEST` + +const TerminationReasonCodeWorkerSetupFailure TerminationReasonCode = `WORKER_SETUP_FAILURE` + +const TerminationReasonCodeWorkspaceCancelledError TerminationReasonCode = `WORKSPACE_CANCELLED_ERROR` + +const TerminationReasonCodeWorkspaceConfigurationError TerminationReasonCode = `WORKSPACE_CONFIGURATION_ERROR` + +// String representation for [fmt.Print] +func (f *TerminationReasonCode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationReasonCode) Set(v string) error { + switch v { + case `ABUSE_DETECTED`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_SHUTDOWN`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `DATABASE_CONNECTION_FAILURE`, `DBFS_COMPONENT_UNHEALTHY`, `DOCKER_IMAGE_PULL_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `EXECUTION_COMPONENT_UNHEALTHY`, `GCP_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_UNREACHABLE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_SPARK_IMAGE`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `STORAGE_DOWNLOAD_FAILURE`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`: + *f = TerminationReasonCode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_SHUTDOWN", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "DATABASE_CONNECTION_FAILURE", "DBFS_COMPONENT_UNHEALTHY", "DOCKER_IMAGE_PULL_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "EXECUTION_COMPONENT_UNHEALTHY", "GCP_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_DELETED", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_UNREACHABLE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_SPARK_IMAGE", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "STORAGE_DOWNLOAD_FAILURE", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR"`, v) + } +} + +// Type always returns TerminationReasonCode to satisfy [pflag.Value] interface +func (f *TerminationReasonCode) Type() string { + return "TerminationReasonCode" +} + +// type of the termination +type TerminationReasonType string + +const TerminationReasonTypeClientError TerminationReasonType = `CLIENT_ERROR` + +const TerminationReasonTypeCloudFailure TerminationReasonType = `CLOUD_FAILURE` + +const TerminationReasonTypeServiceFault TerminationReasonType = `SERVICE_FAULT` + +const TerminationReasonTypeSuccess TerminationReasonType = `SUCCESS` + +// String representation for [fmt.Print] +func (f *TerminationReasonType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationReasonType) Set(v string) error { + switch v { + case `CLIENT_ERROR`, `CLOUD_FAILURE`, `SERVICE_FAULT`, `SUCCESS`: + *f = TerminationReasonType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLIENT_ERROR", "CLOUD_FAILURE", "SERVICE_FAULT", "SUCCESS"`, v) + } +} + +// Type always returns TerminationReasonType to satisfy [pflag.Value] interface +func (f *TerminationReasonType) Type() string { + return "TerminationReasonType" +} + +type UninstallLibraries struct { + // Unique identifier for the cluster on which to uninstall these libraries. + ClusterId string `json:"cluster_id"` + // The libraries to uninstall. + Libraries []Library `json:"libraries"` +} + +type UninstallLibrariesResponse struct { +} + +type UnpinCluster struct { + // + ClusterId string `json:"cluster_id"` +} + +type UnpinClusterResponse struct { +} + +type UpdateCluster struct { + // The cluster to be updated. + Cluster *UpdateClusterResource `json:"cluster,omitempty"` + // ID of the cluster. + ClusterId string `json:"cluster_id"` + // Specifies which fields of the cluster will be updated. This is required + // in the POST request. The update mask should be supplied as a single + // string. To specify multiple fields, separate them with commas (no + // spaces). To delete a field from a cluster configuration, add it to the + // `update_mask` string but omit it from the `cluster` object. + UpdateMask string `json:"update_mask"` +} + +type UpdateClusterResource struct { + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `json:"autoscale,omitempty"` + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` + // The configuration for delivering spark logs to a long-term storage + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName string `json:"cluster_name,omitempty"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // The following modes can only be used with `kind`. * + // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate + // access mode depending on your compute configuration. * + // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * + // `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. + // + // The following modes can be used regardless of `kind`. * `NONE`: No + // security isolation for multiple users sharing the cluster. Data + // governance features are not available in this mode. * `SINGLE_USER`: A + // secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` + + DockerImage *DockerImage `json:"docker_image,omitempty"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId string `json:"instance_pool_id,omitempty"` + // This field can only be used with `kind`. + // + // When set to true, Databricks will automatically set single node related + // `custom_tags`, `spark_conf`, and `num_workers` + IsSingleNode bool `json:"is_single_node,omitempty"` + // The kind of compute described by this compute specification. + // + // Depending on `kind`, different validations and default values will be + // applied. + // + // The first usage of this value is for the simple cluster form where it + // sets `kind = CLASSIC_PREVIEW`. + Kind Kind `json:"kind,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id,omitempty"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers int `json:"num_workers,omitempty"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId string `json:"policy_id,omitempty"` + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. + RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName string `json:"single_user_name,omitempty"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]string `json:"spark_conf,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion string `json:"spark_version,omitempty"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + // This field can only be used with `kind`. + // + // `effective_spark_version` is determined by `spark_version` (DBR release), + // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or + // not. + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` + + WorkloadType *WorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateClusterResource) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateClusterResource) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateClusterResponse struct { +} + +type UpdateResponse struct { +} + +type VolumesStorageInfo struct { + // Unity Catalog volumes file destination, e.g. + // `/Volumes/catalog/schema/volume/dir/file` + Destination string `json:"destination"` +} + +type WorkloadType struct { + // defined what type of clients can use the cluster. E.g. Notebooks, Jobs + Clients ClientsTypes `json:"clients"` +} + +type WorkspaceStorageInfo struct { + // workspace files destination, e.g. + // `/Users/user1@databricks.com/my-init.sh` + Destination string `json:"destination"` +} diff --git a/dashboards/v2preview/api.go b/dashboards/v2preview/api.go new file mode 100755 index 000000000..04627c7d3 --- /dev/null +++ b/dashboards/v2preview/api.go @@ -0,0 +1,412 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Genie Preview, Lakeview Embedded Preview, Lakeview Preview, Query Execution Preview, etc. +package dashboardspreview + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" +) + +type GeniePreviewInterface interface { + + // Create conversation message. + // + // Create new message in [conversation](:method:genie/startconversation). The AI + // response uses all previously created messages in the conversation to respond. + CreateMessage(ctx context.Context, request GenieCreateConversationMessageRequest) (*GenieMessage, error) + + // Execute SQL query in a conversation message. + // + // Execute the SQL query in the message. + ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) + + // Get conversation message. + // + // Get message from conversation. + GetMessage(ctx context.Context, request GenieGetConversationMessageRequest) (*GenieMessage, error) + + // Get conversation message. + // + // Get message from conversation. + GetMessageBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieMessage, error) + + // Get conversation message SQL query result. + // + // Get the result of SQL query if the message has a query attachment. This is + // only available if a message has a query attachment and the message status is + // `EXECUTING_QUERY`. + GetMessageQueryResult(ctx context.Context, request GenieGetMessageQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) + + // Get conversation message SQL query result. + // + // Get the result of SQL query if the message has a query attachment. This is + // only available if a message has a query attachment and the message status is + // `EXECUTING_QUERY`. + GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieGetMessageQueryResultResponse, error) + + // Get conversation message SQL query result by attachment id. + // + // Get the result of SQL query by attachment id This is only available if a + // message has a query attachment and the message status is `EXECUTING_QUERY`. + GetMessageQueryResultByAttachment(ctx context.Context, request GenieGetQueryResultByAttachmentRequest) (*GenieGetMessageQueryResultResponse, error) + + // Get conversation message SQL query result by attachment id. + // + // Get the result of SQL query by attachment id This is only available if a + // message has a query attachment and the message status is `EXECUTING_QUERY`. + GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*GenieGetMessageQueryResultResponse, error) + + // Start conversation. + // + // Start a new conversation. + StartConversation(ctx context.Context, request GenieStartConversationMessageRequest) (*GenieStartConversationResponse, error) +} + +func NewGeniePreview(client *client.DatabricksClient) *GeniePreviewAPI { + return &GeniePreviewAPI{ + geniePreviewImpl: geniePreviewImpl{ + client: client, + }, + } +} + +// Genie provides a no-code experience for business users, powered by AI/BI. +// Analysts set up spaces that business users can use to ask questions using +// natural language. Genie uses data registered to Unity Catalog and requires at +// least CAN USE permission on a Pro or Serverless SQL warehouse. Also, +// Databricks Assistant must be enabled. +type GeniePreviewAPI struct { + geniePreviewImpl +} + +// Get conversation message. +// +// Get message from conversation. +func (a *GeniePreviewAPI) GetMessageBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieMessage, error) { + return a.geniePreviewImpl.GetMessage(ctx, GenieGetConversationMessageRequest{ + SpaceId: spaceId, + ConversationId: conversationId, + MessageId: messageId, + }) +} + +// Get conversation message SQL query result. +// +// Get the result of SQL query if the message has a query attachment. This is +// only available if a message has a query attachment and the message status is +// `EXECUTING_QUERY`. +func (a *GeniePreviewAPI) GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieGetMessageQueryResultResponse, error) { + return a.geniePreviewImpl.GetMessageQueryResult(ctx, GenieGetMessageQueryResultRequest{ + SpaceId: spaceId, + ConversationId: conversationId, + MessageId: messageId, + }) +} + +// Get conversation message SQL query result by attachment id. +// +// Get the result of SQL query by attachment id This is only available if a +// message has a query attachment and the message status is `EXECUTING_QUERY`. +func (a *GeniePreviewAPI) GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*GenieGetMessageQueryResultResponse, error) { + return a.geniePreviewImpl.GetMessageQueryResultByAttachment(ctx, GenieGetQueryResultByAttachmentRequest{ + SpaceId: spaceId, + ConversationId: conversationId, + MessageId: messageId, + AttachmentId: attachmentId, + }) +} + +type LakeviewEmbeddedPreviewInterface interface { + + // Read a published dashboard in an embedded ui. + // + // Get the current published dashboard within an embedded context. + GetPublishedDashboardEmbedded(ctx context.Context, request GetPublishedDashboardEmbeddedRequest) error + + // Read a published dashboard in an embedded ui. + // + // Get the current published dashboard within an embedded context. + GetPublishedDashboardEmbeddedByDashboardId(ctx context.Context, dashboardId string) error +} + +func NewLakeviewEmbeddedPreview(client *client.DatabricksClient) *LakeviewEmbeddedPreviewAPI { + return &LakeviewEmbeddedPreviewAPI{ + lakeviewEmbeddedPreviewImpl: lakeviewEmbeddedPreviewImpl{ + client: client, + }, + } +} + +// Token-based Lakeview APIs for embedding dashboards in external applications. +type LakeviewEmbeddedPreviewAPI struct { + lakeviewEmbeddedPreviewImpl +} + +// Read a published dashboard in an embedded ui. +// +// Get the current published dashboard within an embedded context. +func (a *LakeviewEmbeddedPreviewAPI) GetPublishedDashboardEmbeddedByDashboardId(ctx context.Context, dashboardId string) error { + return a.lakeviewEmbeddedPreviewImpl.GetPublishedDashboardEmbedded(ctx, GetPublishedDashboardEmbeddedRequest{ + DashboardId: dashboardId, + }) +} + +type LakeviewPreviewInterface interface { + + // Create dashboard. + // + // Create a draft dashboard. + Create(ctx context.Context, request CreateDashboardRequest) (*Dashboard, error) + + // Create dashboard schedule. + CreateSchedule(ctx context.Context, request CreateScheduleRequest) (*Schedule, error) + + // Create schedule subscription. + CreateSubscription(ctx context.Context, request CreateSubscriptionRequest) (*Subscription, error) + + // Delete dashboard schedule. + DeleteSchedule(ctx context.Context, request DeleteScheduleRequest) error + + // Delete dashboard schedule. + DeleteScheduleByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) error + + // Delete schedule subscription. + DeleteSubscription(ctx context.Context, request DeleteSubscriptionRequest) error + + // Delete schedule subscription. + DeleteSubscriptionByDashboardIdAndScheduleIdAndSubscriptionId(ctx context.Context, dashboardId string, scheduleId string, subscriptionId string) error + + // Get dashboard. + // + // Get a draft dashboard. + Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error) + + // Get dashboard. + // + // Get a draft dashboard. + GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error) + + // Get published dashboard. + // + // Get the current published dashboard. + GetPublished(ctx context.Context, request GetPublishedDashboardRequest) (*PublishedDashboard, error) + + // Get published dashboard. + // + // Get the current published dashboard. + GetPublishedByDashboardId(ctx context.Context, dashboardId string) (*PublishedDashboard, error) + + // Get dashboard schedule. + GetSchedule(ctx context.Context, request GetScheduleRequest) (*Schedule, error) + + // Get dashboard schedule. + GetScheduleByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) (*Schedule, error) + + // Get schedule subscription. + GetSubscription(ctx context.Context, request GetSubscriptionRequest) (*Subscription, error) + + // Get schedule subscription. + GetSubscriptionByDashboardIdAndScheduleIdAndSubscriptionId(ctx context.Context, dashboardId string, scheduleId string, subscriptionId string) (*Subscription, error) + + // List dashboards. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] + + // List dashboards. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) + + // List dashboard schedules. + // + // This method is generated by Databricks SDK Code Generator. + ListSchedules(ctx context.Context, request ListSchedulesRequest) listing.Iterator[Schedule] + + // List dashboard schedules. + // + // This method is generated by Databricks SDK Code Generator. + ListSchedulesAll(ctx context.Context, request ListSchedulesRequest) ([]Schedule, error) + + // List dashboard schedules. + ListSchedulesByDashboardId(ctx context.Context, dashboardId string) (*ListSchedulesResponse, error) + + // List schedule subscriptions. + // + // This method is generated by Databricks SDK Code Generator. + ListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) listing.Iterator[Subscription] + + // List schedule subscriptions. + // + // This method is generated by Databricks SDK Code Generator. + ListSubscriptionsAll(ctx context.Context, request ListSubscriptionsRequest) ([]Subscription, error) + + // List schedule subscriptions. + ListSubscriptionsByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) (*ListSubscriptionsResponse, error) + + // Migrate dashboard. + // + // Migrates a classic SQL dashboard to Lakeview. + Migrate(ctx context.Context, request MigrateDashboardRequest) (*Dashboard, error) + + // Publish dashboard. + // + // Publish the current draft dashboard. + Publish(ctx context.Context, request PublishRequest) (*PublishedDashboard, error) + + // Trash dashboard. + // + // Trash a dashboard. + Trash(ctx context.Context, request TrashDashboardRequest) error + + // Trash dashboard. + // + // Trash a dashboard. + TrashByDashboardId(ctx context.Context, dashboardId string) error + + // Unpublish dashboard. + // + // Unpublish the dashboard. + Unpublish(ctx context.Context, request UnpublishDashboardRequest) error + + // Unpublish dashboard. + // + // Unpublish the dashboard. + UnpublishByDashboardId(ctx context.Context, dashboardId string) error + + // Update dashboard. + // + // Update a draft dashboard. + Update(ctx context.Context, request UpdateDashboardRequest) (*Dashboard, error) + + // Update dashboard schedule. + UpdateSchedule(ctx context.Context, request UpdateScheduleRequest) (*Schedule, error) +} + +func NewLakeviewPreview(client *client.DatabricksClient) *LakeviewPreviewAPI { + return &LakeviewPreviewAPI{ + lakeviewPreviewImpl: lakeviewPreviewImpl{ + client: client, + }, + } +} + +// These APIs provide specific management operations for Lakeview dashboards. +// Generic resource management can be done with Workspace API (import, export, +// get-status, list, delete). +type LakeviewPreviewAPI struct { + lakeviewPreviewImpl +} + +// Delete dashboard schedule. +func (a *LakeviewPreviewAPI) DeleteScheduleByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) error { + return a.lakeviewPreviewImpl.DeleteSchedule(ctx, DeleteScheduleRequest{ + DashboardId: dashboardId, + ScheduleId: scheduleId, + }) +} + +// Delete schedule subscription. +func (a *LakeviewPreviewAPI) DeleteSubscriptionByDashboardIdAndScheduleIdAndSubscriptionId(ctx context.Context, dashboardId string, scheduleId string, subscriptionId string) error { + return a.lakeviewPreviewImpl.DeleteSubscription(ctx, DeleteSubscriptionRequest{ + DashboardId: dashboardId, + ScheduleId: scheduleId, + SubscriptionId: subscriptionId, + }) +} + +// Get dashboard. +// +// Get a draft dashboard. +func (a *LakeviewPreviewAPI) GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error) { + return a.lakeviewPreviewImpl.Get(ctx, GetDashboardRequest{ + DashboardId: dashboardId, + }) +} + +// Get published dashboard. +// +// Get the current published dashboard. +func (a *LakeviewPreviewAPI) GetPublishedByDashboardId(ctx context.Context, dashboardId string) (*PublishedDashboard, error) { + return a.lakeviewPreviewImpl.GetPublished(ctx, GetPublishedDashboardRequest{ + DashboardId: dashboardId, + }) +} + +// Get dashboard schedule. +func (a *LakeviewPreviewAPI) GetScheduleByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) (*Schedule, error) { + return a.lakeviewPreviewImpl.GetSchedule(ctx, GetScheduleRequest{ + DashboardId: dashboardId, + ScheduleId: scheduleId, + }) +} + +// Get schedule subscription. +func (a *LakeviewPreviewAPI) GetSubscriptionByDashboardIdAndScheduleIdAndSubscriptionId(ctx context.Context, dashboardId string, scheduleId string, subscriptionId string) (*Subscription, error) { + return a.lakeviewPreviewImpl.GetSubscription(ctx, GetSubscriptionRequest{ + DashboardId: dashboardId, + ScheduleId: scheduleId, + SubscriptionId: subscriptionId, + }) +} + +// List dashboard schedules. +func (a *LakeviewPreviewAPI) ListSchedulesByDashboardId(ctx context.Context, dashboardId string) (*ListSchedulesResponse, error) { + return a.lakeviewPreviewImpl.internalListSchedules(ctx, ListSchedulesRequest{ + DashboardId: dashboardId, + }) +} + +// List schedule subscriptions. +func (a *LakeviewPreviewAPI) ListSubscriptionsByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) (*ListSubscriptionsResponse, error) { + return a.lakeviewPreviewImpl.internalListSubscriptions(ctx, ListSubscriptionsRequest{ + DashboardId: dashboardId, + ScheduleId: scheduleId, + }) +} + +// Trash dashboard. +// +// Trash a dashboard. +func (a *LakeviewPreviewAPI) TrashByDashboardId(ctx context.Context, dashboardId string) error { + return a.lakeviewPreviewImpl.Trash(ctx, TrashDashboardRequest{ + DashboardId: dashboardId, + }) +} + +// Unpublish dashboard. +// +// Unpublish the dashboard. +func (a *LakeviewPreviewAPI) UnpublishByDashboardId(ctx context.Context, dashboardId string) error { + return a.lakeviewPreviewImpl.Unpublish(ctx, UnpublishDashboardRequest{ + DashboardId: dashboardId, + }) +} + +type QueryExecutionPreviewInterface interface { + + // Cancel the results for the a query for a published, embedded dashboard. + CancelPublishedQueryExecution(ctx context.Context, request CancelPublishedQueryExecutionRequest) (*CancelQueryExecutionResponse, error) + + // Execute a query for a published dashboard. + ExecutePublishedDashboardQuery(ctx context.Context, request ExecutePublishedDashboardQueryRequest) error + + // Poll the results for the a query for a published, embedded dashboard. + PollPublishedQueryStatus(ctx context.Context, request PollPublishedQueryStatusRequest) (*PollQueryStatusResponse, error) +} + +func NewQueryExecutionPreview(client *client.DatabricksClient) *QueryExecutionPreviewAPI { + return &QueryExecutionPreviewAPI{ + queryExecutionPreviewImpl: queryExecutionPreviewImpl{ + client: client, + }, + } +} + +// Query execution APIs for AI / BI Dashboards +type QueryExecutionPreviewAPI struct { + queryExecutionPreviewImpl +} diff --git a/dashboards/v2preview/client.go b/dashboards/v2preview/client.go new file mode 100755 index 000000000..7022f0c6a --- /dev/null +++ b/dashboards/v2preview/client.go @@ -0,0 +1,147 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package dashboardspreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type GeniePreviewClient struct { + GeniePreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewGeniePreviewClient(cfg *config.Config) (*GeniePreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &GeniePreviewClient{ + Config: cfg, + apiClient: apiClient, + GeniePreviewInterface: NewGeniePreview(databricksClient), + }, nil +} + +type LakeviewEmbeddedPreviewClient struct { + LakeviewEmbeddedPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewLakeviewEmbeddedPreviewClient(cfg *config.Config) (*LakeviewEmbeddedPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &LakeviewEmbeddedPreviewClient{ + Config: cfg, + apiClient: apiClient, + LakeviewEmbeddedPreviewInterface: NewLakeviewEmbeddedPreview(databricksClient), + }, nil +} + +type LakeviewPreviewClient struct { + LakeviewPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewLakeviewPreviewClient(cfg *config.Config) (*LakeviewPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &LakeviewPreviewClient{ + Config: cfg, + apiClient: apiClient, + LakeviewPreviewInterface: NewLakeviewPreview(databricksClient), + }, nil +} + +type QueryExecutionPreviewClient struct { + QueryExecutionPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewQueryExecutionPreviewClient(cfg *config.Config) (*QueryExecutionPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &QueryExecutionPreviewClient{ + Config: cfg, + apiClient: apiClient, + QueryExecutionPreviewInterface: NewQueryExecutionPreview(databricksClient), + }, nil +} diff --git a/dashboards/v2preview/impl.go b/dashboards/v2preview/impl.go new file mode 100755 index 000000000..077a5bcc3 --- /dev/null +++ b/dashboards/v2preview/impl.go @@ -0,0 +1,413 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package dashboardspreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just GeniePreview API methods +type geniePreviewImpl struct { + client *client.DatabricksClient +} + +func (a *geniePreviewImpl) CreateMessage(ctx context.Context, request GenieCreateConversationMessageRequest) (*GenieMessage, error) { + var genieMessage GenieMessage + path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/conversations/%v/messages", request.SpaceId, request.ConversationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &genieMessage) + return &genieMessage, err +} + +func (a *geniePreviewImpl) ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) { + var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse + path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/conversations/%v/messages/%v/execute-query", request.SpaceId, request.ConversationId, request.MessageId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &genieGetMessageQueryResultResponse) + return &genieGetMessageQueryResultResponse, err +} + +func (a *geniePreviewImpl) GetMessage(ctx context.Context, request GenieGetConversationMessageRequest) (*GenieMessage, error) { + var genieMessage GenieMessage + path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/conversations/%v/messages/%v", request.SpaceId, request.ConversationId, request.MessageId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &genieMessage) + return &genieMessage, err +} + +func (a *geniePreviewImpl) GetMessageQueryResult(ctx context.Context, request GenieGetMessageQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) { + var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse + path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/conversations/%v/messages/%v/query-result", request.SpaceId, request.ConversationId, request.MessageId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &genieGetMessageQueryResultResponse) + return &genieGetMessageQueryResultResponse, err +} + +func (a *geniePreviewImpl) GetMessageQueryResultByAttachment(ctx context.Context, request GenieGetQueryResultByAttachmentRequest) (*GenieGetMessageQueryResultResponse, error) { + var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse + path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/conversations/%v/messages/%v/query-result/%v", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &genieGetMessageQueryResultResponse) + return &genieGetMessageQueryResultResponse, err +} + +func (a *geniePreviewImpl) StartConversation(ctx context.Context, request GenieStartConversationMessageRequest) (*GenieStartConversationResponse, error) { + var genieStartConversationResponse GenieStartConversationResponse + path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/start-conversation", request.SpaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &genieStartConversationResponse) + return &genieStartConversationResponse, err +} + +// unexported type that holds implementations of just LakeviewEmbeddedPreview API methods +type lakeviewEmbeddedPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *lakeviewEmbeddedPreviewImpl) GetPublishedDashboardEmbedded(ctx context.Context, request GetPublishedDashboardEmbeddedRequest) error { + var getPublishedDashboardEmbeddedResponse GetPublishedDashboardEmbeddedResponse + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/published/embedded", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedDashboardEmbeddedResponse) + return err +} + +// unexported type that holds implementations of just LakeviewPreview API methods +type lakeviewPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *lakeviewPreviewImpl) Create(ctx context.Context, request CreateDashboardRequest) (*Dashboard, error) { + var dashboard Dashboard + path := "/api/2.0preview/lakeview/dashboards" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Dashboard, &dashboard) + return &dashboard, err +} + +func (a *lakeviewPreviewImpl) CreateSchedule(ctx context.Context, request CreateScheduleRequest) (*Schedule, error) { + var schedule Schedule + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Schedule, &schedule) + return &schedule, err +} + +func (a *lakeviewPreviewImpl) CreateSubscription(ctx context.Context, request CreateSubscriptionRequest) (*Subscription, error) { + var subscription Subscription + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v/subscriptions", request.DashboardId, request.ScheduleId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Subscription, &subscription) + return &subscription, err +} + +func (a *lakeviewPreviewImpl) DeleteSchedule(ctx context.Context, request DeleteScheduleRequest) error { + var deleteScheduleResponse DeleteScheduleResponse + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v", request.DashboardId, request.ScheduleId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteScheduleResponse) + return err +} + +func (a *lakeviewPreviewImpl) DeleteSubscription(ctx context.Context, request DeleteSubscriptionRequest) error { + var deleteSubscriptionResponse DeleteSubscriptionResponse + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v/subscriptions/%v", request.DashboardId, request.ScheduleId, request.SubscriptionId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteSubscriptionResponse) + return err +} + +func (a *lakeviewPreviewImpl) Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error) { + var dashboard Dashboard + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &dashboard) + return &dashboard, err +} + +func (a *lakeviewPreviewImpl) GetPublished(ctx context.Context, request GetPublishedDashboardRequest) (*PublishedDashboard, error) { + var publishedDashboard PublishedDashboard + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/published", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &publishedDashboard) + return &publishedDashboard, err +} + +func (a *lakeviewPreviewImpl) GetSchedule(ctx context.Context, request GetScheduleRequest) (*Schedule, error) { + var schedule Schedule + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v", request.DashboardId, request.ScheduleId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &schedule) + return &schedule, err +} + +func (a *lakeviewPreviewImpl) GetSubscription(ctx context.Context, request GetSubscriptionRequest) (*Subscription, error) { + var subscription Subscription + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v/subscriptions/%v", request.DashboardId, request.ScheduleId, request.SubscriptionId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &subscription) + return &subscription, err +} + +// List dashboards. +func (a *lakeviewPreviewImpl) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { + + getNextPage := func(ctx context.Context, req ListDashboardsRequest) (*ListDashboardsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListDashboardsResponse) []Dashboard { + return resp.Dashboards + } + getNextReq := func(resp *ListDashboardsResponse) *ListDashboardsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List dashboards. +func (a *lakeviewPreviewImpl) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[Dashboard](ctx, iterator) +} +func (a *lakeviewPreviewImpl) internalList(ctx context.Context, request ListDashboardsRequest) (*ListDashboardsResponse, error) { + var listDashboardsResponse ListDashboardsResponse + path := "/api/2.0preview/lakeview/dashboards" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listDashboardsResponse) + return &listDashboardsResponse, err +} + +// List dashboard schedules. +func (a *lakeviewPreviewImpl) ListSchedules(ctx context.Context, request ListSchedulesRequest) listing.Iterator[Schedule] { + + getNextPage := func(ctx context.Context, req ListSchedulesRequest) (*ListSchedulesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListSchedules(ctx, req) + } + getItems := func(resp *ListSchedulesResponse) []Schedule { + return resp.Schedules + } + getNextReq := func(resp *ListSchedulesResponse) *ListSchedulesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List dashboard schedules. +func (a *lakeviewPreviewImpl) ListSchedulesAll(ctx context.Context, request ListSchedulesRequest) ([]Schedule, error) { + iterator := a.ListSchedules(ctx, request) + return listing.ToSlice[Schedule](ctx, iterator) +} +func (a *lakeviewPreviewImpl) internalListSchedules(ctx context.Context, request ListSchedulesRequest) (*ListSchedulesResponse, error) { + var listSchedulesResponse ListSchedulesResponse + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSchedulesResponse) + return &listSchedulesResponse, err +} + +// List schedule subscriptions. +func (a *lakeviewPreviewImpl) ListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) listing.Iterator[Subscription] { + + getNextPage := func(ctx context.Context, req ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListSubscriptions(ctx, req) + } + getItems := func(resp *ListSubscriptionsResponse) []Subscription { + return resp.Subscriptions + } + getNextReq := func(resp *ListSubscriptionsResponse) *ListSubscriptionsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List schedule subscriptions. +func (a *lakeviewPreviewImpl) ListSubscriptionsAll(ctx context.Context, request ListSubscriptionsRequest) ([]Subscription, error) { + iterator := a.ListSubscriptions(ctx, request) + return listing.ToSlice[Subscription](ctx, iterator) +} +func (a *lakeviewPreviewImpl) internalListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { + var listSubscriptionsResponse ListSubscriptionsResponse + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v/subscriptions", request.DashboardId, request.ScheduleId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSubscriptionsResponse) + return &listSubscriptionsResponse, err +} + +func (a *lakeviewPreviewImpl) Migrate(ctx context.Context, request MigrateDashboardRequest) (*Dashboard, error) { + var dashboard Dashboard + path := "/api/2.0preview/lakeview/dashboards/migrate" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &dashboard) + return &dashboard, err +} + +func (a *lakeviewPreviewImpl) Publish(ctx context.Context, request PublishRequest) (*PublishedDashboard, error) { + var publishedDashboard PublishedDashboard + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/published", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &publishedDashboard) + return &publishedDashboard, err +} + +func (a *lakeviewPreviewImpl) Trash(ctx context.Context, request TrashDashboardRequest) error { + var trashDashboardResponse TrashDashboardResponse + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &trashDashboardResponse) + return err +} + +func (a *lakeviewPreviewImpl) Unpublish(ctx context.Context, request UnpublishDashboardRequest) error { + var unpublishDashboardResponse UnpublishDashboardResponse + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/published", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &unpublishDashboardResponse) + return err +} + +func (a *lakeviewPreviewImpl) Update(ctx context.Context, request UpdateDashboardRequest) (*Dashboard, error) { + var dashboard Dashboard + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.Dashboard, &dashboard) + return &dashboard, err +} + +func (a *lakeviewPreviewImpl) UpdateSchedule(ctx context.Context, request UpdateScheduleRequest) (*Schedule, error) { + var schedule Schedule + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v", request.DashboardId, request.ScheduleId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request.Schedule, &schedule) + return &schedule, err +} + +// unexported type that holds implementations of just QueryExecutionPreview API methods +type queryExecutionPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *queryExecutionPreviewImpl) CancelPublishedQueryExecution(ctx context.Context, request CancelPublishedQueryExecutionRequest) (*CancelQueryExecutionResponse, error) { + var cancelQueryExecutionResponse CancelQueryExecutionResponse + path := "/api/2.0preview/lakeview-query/query/published" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &cancelQueryExecutionResponse) + return &cancelQueryExecutionResponse, err +} + +func (a *queryExecutionPreviewImpl) ExecutePublishedDashboardQuery(ctx context.Context, request ExecutePublishedDashboardQueryRequest) error { + var executeQueryResponse ExecuteQueryResponse + path := "/api/2.0preview/lakeview-query/query/published" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &executeQueryResponse) + return err +} + +func (a *queryExecutionPreviewImpl) PollPublishedQueryStatus(ctx context.Context, request PollPublishedQueryStatusRequest) (*PollQueryStatusResponse, error) { + var pollQueryStatusResponse PollQueryStatusResponse + path := "/api/2.0preview/lakeview-query/query/published" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &pollQueryStatusResponse) + return &pollQueryStatusResponse, err +} diff --git a/dashboards/v2preview/model.go b/dashboards/v2preview/model.go new file mode 100755 index 000000000..0870c63d5 --- /dev/null +++ b/dashboards/v2preview/model.go @@ -0,0 +1,1641 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package dashboardspreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +// Describes metadata for a particular chunk, within a result set; this +// structure is used both within a manifest, and when fetching individual chunk +// data or links. +type BaseChunkInfo struct { + // The number of bytes in the result chunk. This field is not available when + // using `INLINE` disposition. + ByteCount int64 `json:"byte_count,omitempty"` + // The position within the sequence of result set chunks. + ChunkIndex int `json:"chunk_index,omitempty"` + // The number of rows within the result chunk. + RowCount int64 `json:"row_count,omitempty"` + // The starting row offset within the result set. + RowOffset int64 `json:"row_offset,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *BaseChunkInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s BaseChunkInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Cancel the results for the a query for a published, embedded dashboard +type CancelPublishedQueryExecutionRequest struct { + DashboardName string `json:"-" url:"dashboard_name"` + + DashboardRevisionId string `json:"-" url:"dashboard_revision_id"` + // Example: + // EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + Tokens []string `json:"-" url:"tokens,omitempty"` +} + +type CancelQueryExecutionResponse struct { + Status []CancelQueryExecutionResponseStatus `json:"status,omitempty"` +} + +type CancelQueryExecutionResponseStatus struct { + // The token to poll for result asynchronously Example: + // EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + DataToken string `json:"data_token"` + // Represents an empty message, similar to google.protobuf.Empty, which is + // not available in the firm right now. + Pending *Empty `json:"pending,omitempty"` + // Represents an empty message, similar to google.protobuf.Empty, which is + // not available in the firm right now. + Success *Empty `json:"success,omitempty"` +} + +type ColumnInfo struct { + // The name of the column. + Name string `json:"name,omitempty"` + // The ordinal position of the column (starting at position 0). + Position int `json:"position,omitempty"` + // The format of the interval type. + TypeIntervalType string `json:"type_interval_type,omitempty"` + // The name of the base data type. This doesn't include details for complex + // types such as STRUCT, MAP or ARRAY. + TypeName ColumnInfoTypeName `json:"type_name,omitempty"` + // Specifies the number of digits in a number. This applies to the DECIMAL + // type. + TypePrecision int `json:"type_precision,omitempty"` + // Specifies the number of digits to the right of the decimal point in a + // number. This applies to the DECIMAL type. + TypeScale int `json:"type_scale,omitempty"` + // The full SQL type specification. + TypeText string `json:"type_text,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ColumnInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ColumnInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The name of the base data type. This doesn't include details for complex +// types such as STRUCT, MAP or ARRAY. +type ColumnInfoTypeName string + +const ColumnInfoTypeNameArray ColumnInfoTypeName = `ARRAY` + +const ColumnInfoTypeNameBinary ColumnInfoTypeName = `BINARY` + +const ColumnInfoTypeNameBoolean ColumnInfoTypeName = `BOOLEAN` + +const ColumnInfoTypeNameByte ColumnInfoTypeName = `BYTE` + +const ColumnInfoTypeNameChar ColumnInfoTypeName = `CHAR` + +const ColumnInfoTypeNameDate ColumnInfoTypeName = `DATE` + +const ColumnInfoTypeNameDecimal ColumnInfoTypeName = `DECIMAL` + +const ColumnInfoTypeNameDouble ColumnInfoTypeName = `DOUBLE` + +const ColumnInfoTypeNameFloat ColumnInfoTypeName = `FLOAT` + +const ColumnInfoTypeNameInt ColumnInfoTypeName = `INT` + +const ColumnInfoTypeNameInterval ColumnInfoTypeName = `INTERVAL` + +const ColumnInfoTypeNameLong ColumnInfoTypeName = `LONG` + +const ColumnInfoTypeNameMap ColumnInfoTypeName = `MAP` + +const ColumnInfoTypeNameNull ColumnInfoTypeName = `NULL` + +const ColumnInfoTypeNameShort ColumnInfoTypeName = `SHORT` + +const ColumnInfoTypeNameString ColumnInfoTypeName = `STRING` + +const ColumnInfoTypeNameStruct ColumnInfoTypeName = `STRUCT` + +const ColumnInfoTypeNameTimestamp ColumnInfoTypeName = `TIMESTAMP` + +const ColumnInfoTypeNameUserDefinedType ColumnInfoTypeName = `USER_DEFINED_TYPE` + +// String representation for [fmt.Print] +func (f *ColumnInfoTypeName) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ColumnInfoTypeName) Set(v string) error { + switch v { + case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TIMESTAMP`, `USER_DEFINED_TYPE`: + *f = ColumnInfoTypeName(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TIMESTAMP", "USER_DEFINED_TYPE"`, v) + } +} + +// Type always returns ColumnInfoTypeName to satisfy [pflag.Value] interface +func (f *ColumnInfoTypeName) Type() string { + return "ColumnInfoTypeName" +} + +// Create dashboard +type CreateDashboardRequest struct { + Dashboard *Dashboard `json:"dashboard,omitempty"` +} + +// Create dashboard schedule +type CreateScheduleRequest struct { + // UUID identifying the dashboard to which the schedule belongs. + DashboardId string `json:"-" url:"-"` + + Schedule *Schedule `json:"schedule,omitempty"` +} + +// Create schedule subscription +type CreateSubscriptionRequest struct { + // UUID identifying the dashboard to which the subscription belongs. + DashboardId string `json:"-" url:"-"` + // UUID identifying the schedule to which the subscription belongs. + ScheduleId string `json:"-" url:"-"` + + Subscription *Subscription `json:"subscription,omitempty"` +} + +type CronSchedule struct { + // A cron expression using quartz syntax. EX: `0 0 8 * * ?` represents + // everyday at 8am. See [Cron Trigger] for details. + // + // [Cron Trigger]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html + QuartzCronExpression string `json:"quartz_cron_expression"` + // A Java timezone id. The schedule will be resolved with respect to this + // timezone. See [Java TimeZone] for details. + // + // [Java TimeZone]: https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html + TimezoneId string `json:"timezone_id"` +} + +type Dashboard struct { + // The timestamp of when the dashboard was created. + CreateTime string `json:"create_time,omitempty"` + // UUID identifying the dashboard. + DashboardId string `json:"dashboard_id,omitempty"` + // The display name of the dashboard. + DisplayName string `json:"display_name,omitempty"` + // The etag for the dashboard. Can be optionally provided on updates to + // ensure that the dashboard has not been modified since the last read. This + // field is excluded in List Dashboards responses. + Etag string `json:"etag,omitempty"` + // The state of the dashboard resource. Used for tracking trashed status. + LifecycleState LifecycleState `json:"lifecycle_state,omitempty"` + // The workspace path of the folder containing the dashboard. Includes + // leading slash and no trailing slash. This field is excluded in List + // Dashboards responses. + ParentPath string `json:"parent_path,omitempty"` + // The workspace path of the dashboard asset, including the file name. + // Exported dashboards always have the file extension `.lvdash.json`. This + // field is excluded in List Dashboards responses. + Path string `json:"path,omitempty"` + // The contents of the dashboard in serialized string form. This field is + // excluded in List Dashboards responses. Use the [get dashboard API] to + // retrieve an example response, which includes the `serialized_dashboard` + // field. This field provides the structure of the JSON string that + // represents the dashboard's layout and components. + // + // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get + SerializedDashboard string `json:"serialized_dashboard,omitempty"` + // The timestamp of when the dashboard was last updated by the user. This + // field is excluded in List Dashboards responses. + UpdateTime string `json:"update_time,omitempty"` + // The warehouse ID used to run the dashboard. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Dashboard) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Dashboard) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DashboardView string + +const DashboardViewDashboardViewBasic DashboardView = `DASHBOARD_VIEW_BASIC` + +// String representation for [fmt.Print] +func (f *DashboardView) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DashboardView) Set(v string) error { + switch v { + case `DASHBOARD_VIEW_BASIC`: + *f = DashboardView(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DASHBOARD_VIEW_BASIC"`, v) + } +} + +// Type always returns DashboardView to satisfy [pflag.Value] interface +func (f *DashboardView) Type() string { + return "DashboardView" +} + +type DataType string + +const DataTypeDataTypeArray DataType = `DATA_TYPE_ARRAY` + +const DataTypeDataTypeBigInt DataType = `DATA_TYPE_BIG_INT` + +const DataTypeDataTypeBinary DataType = `DATA_TYPE_BINARY` + +const DataTypeDataTypeBoolean DataType = `DATA_TYPE_BOOLEAN` + +const DataTypeDataTypeDate DataType = `DATA_TYPE_DATE` + +const DataTypeDataTypeDecimal DataType = `DATA_TYPE_DECIMAL` + +const DataTypeDataTypeDouble DataType = `DATA_TYPE_DOUBLE` + +const DataTypeDataTypeFloat DataType = `DATA_TYPE_FLOAT` + +const DataTypeDataTypeInt DataType = `DATA_TYPE_INT` + +const DataTypeDataTypeInterval DataType = `DATA_TYPE_INTERVAL` + +const DataTypeDataTypeMap DataType = `DATA_TYPE_MAP` + +const DataTypeDataTypeSmallInt DataType = `DATA_TYPE_SMALL_INT` + +const DataTypeDataTypeString DataType = `DATA_TYPE_STRING` + +const DataTypeDataTypeStruct DataType = `DATA_TYPE_STRUCT` + +const DataTypeDataTypeTimestamp DataType = `DATA_TYPE_TIMESTAMP` + +const DataTypeDataTypeTinyInt DataType = `DATA_TYPE_TINY_INT` + +const DataTypeDataTypeVoid DataType = `DATA_TYPE_VOID` + +// String representation for [fmt.Print] +func (f *DataType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DataType) Set(v string) error { + switch v { + case `DATA_TYPE_ARRAY`, `DATA_TYPE_BIG_INT`, `DATA_TYPE_BINARY`, `DATA_TYPE_BOOLEAN`, `DATA_TYPE_DATE`, `DATA_TYPE_DECIMAL`, `DATA_TYPE_DOUBLE`, `DATA_TYPE_FLOAT`, `DATA_TYPE_INT`, `DATA_TYPE_INTERVAL`, `DATA_TYPE_MAP`, `DATA_TYPE_SMALL_INT`, `DATA_TYPE_STRING`, `DATA_TYPE_STRUCT`, `DATA_TYPE_TIMESTAMP`, `DATA_TYPE_TINY_INT`, `DATA_TYPE_VOID`: + *f = DataType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATA_TYPE_ARRAY", "DATA_TYPE_BIG_INT", "DATA_TYPE_BINARY", "DATA_TYPE_BOOLEAN", "DATA_TYPE_DATE", "DATA_TYPE_DECIMAL", "DATA_TYPE_DOUBLE", "DATA_TYPE_FLOAT", "DATA_TYPE_INT", "DATA_TYPE_INTERVAL", "DATA_TYPE_MAP", "DATA_TYPE_SMALL_INT", "DATA_TYPE_STRING", "DATA_TYPE_STRUCT", "DATA_TYPE_TIMESTAMP", "DATA_TYPE_TINY_INT", "DATA_TYPE_VOID"`, v) + } +} + +// Type always returns DataType to satisfy [pflag.Value] interface +func (f *DataType) Type() string { + return "DataType" +} + +// Delete dashboard schedule +type DeleteScheduleRequest struct { + // UUID identifying the dashboard to which the schedule belongs. + DashboardId string `json:"-" url:"-"` + // The etag for the schedule. Optionally, it can be provided to verify that + // the schedule has not been modified from its last retrieval. + Etag string `json:"-" url:"etag,omitempty"` + // UUID identifying the schedule. + ScheduleId string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteScheduleRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteScheduleRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DeleteScheduleResponse struct { +} + +// Delete schedule subscription +type DeleteSubscriptionRequest struct { + // UUID identifying the dashboard which the subscription belongs. + DashboardId string `json:"-" url:"-"` + // The etag for the subscription. Can be optionally provided to ensure that + // the subscription has not been modified since the last read. + Etag string `json:"-" url:"etag,omitempty"` + // UUID identifying the schedule which the subscription belongs. + ScheduleId string `json:"-" url:"-"` + // UUID identifying the subscription. + SubscriptionId string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteSubscriptionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteSubscriptionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DeleteSubscriptionResponse struct { +} + +// Represents an empty message, similar to google.protobuf.Empty, which is not +// available in the firm right now. +type Empty struct { +} + +// Execute query request for published Dashboards. Since published dashboards +// have the option of running as the publisher, the datasets, warehouse_id are +// excluded from the request and instead read from the source (lakeview-config) +// via the additional parameters (dashboardName and dashboardRevisionId) +type ExecutePublishedDashboardQueryRequest struct { + // Dashboard name and revision_id is required to retrieve + // PublishedDatasetDataModel which contains the list of datasets, + // warehouse_id, and embedded_credentials + DashboardName string `json:"dashboard_name"` + + DashboardRevisionId string `json:"dashboard_revision_id"` + // A dashboard schedule can override the warehouse used as compute for + // processing the published dashboard queries + OverrideWarehouseId string `json:"override_warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExecutePublishedDashboardQueryRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExecutePublishedDashboardQueryRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExecuteQueryResponse struct { +} + +type ExternalLink struct { + // The number of bytes in the result chunk. This field is not available when + // using `INLINE` disposition. + ByteCount int64 `json:"byte_count,omitempty"` + // The position within the sequence of result set chunks. + ChunkIndex int `json:"chunk_index,omitempty"` + // Indicates the date-time that the given external link will expire and + // becomes invalid, after which point a new `external_link` must be + // requested. + Expiration string `json:"expiration,omitempty"` + + ExternalLink string `json:"external_link,omitempty"` + // HTTP headers that must be included with a GET request to the + // `external_link`. Each header is provided as a key-value pair. Headers are + // typically used to pass a decryption key to the external service. The + // values of these headers should be considered sensitive and the client + // should not expose these values in a log. + HttpHeaders map[string]string `json:"http_headers,omitempty"` + // When fetching, provides the `chunk_index` for the _next_ chunk. If + // absent, indicates there are no more chunks. The next chunk can be fetched + // with a :method:statementexecution/getStatementResultChunkN request. + NextChunkIndex int `json:"next_chunk_index,omitempty"` + // When fetching, provides a link to fetch the _next_ chunk. If absent, + // indicates there are no more chunks. This link is an absolute `path` to be + // joined with your `$DATABRICKS_HOST`, and should be treated as an opaque + // link. This is an alternative to using `next_chunk_index`. + NextChunkInternalLink string `json:"next_chunk_internal_link,omitempty"` + // The number of rows within the result chunk. + RowCount int64 `json:"row_count,omitempty"` + // The starting row offset within the result set. + RowOffset int64 `json:"row_offset,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExternalLink) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExternalLink) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Format string + +const FormatArrowStream Format = `ARROW_STREAM` + +const FormatCsv Format = `CSV` + +const FormatJsonArray Format = `JSON_ARRAY` + +// String representation for [fmt.Print] +func (f *Format) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Format) Set(v string) error { + switch v { + case `ARROW_STREAM`, `CSV`, `JSON_ARRAY`: + *f = Format(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARROW_STREAM", "CSV", "JSON_ARRAY"`, v) + } +} + +// Type always returns Format to satisfy [pflag.Value] interface +func (f *Format) Type() string { + return "Format" +} + +// Genie AI Response +type GenieAttachment struct { + Query *QueryAttachment `json:"query,omitempty"` + + Text *TextAttachment `json:"text,omitempty"` +} + +type GenieConversation struct { + // Timestamp when the message was created + CreatedTimestamp int64 `json:"created_timestamp,omitempty"` + // Conversation ID + Id string `json:"id"` + // Timestamp when the message was last updated + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // Genie space ID + SpaceId string `json:"space_id"` + // Conversation title + Title string `json:"title"` + // ID of the user who created the conversation + UserId int `json:"user_id"` + + ForceSendFields []string `json:"-"` +} + +func (s *GenieConversation) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenieConversation) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GenieCreateConversationMessageRequest struct { + // User message content. + Content string `json:"content"` + // The ID associated with the conversation. + ConversationId string `json:"-" url:"-"` + // The ID associated with the Genie space where the conversation is started. + SpaceId string `json:"-" url:"-"` +} + +// Execute SQL query in a conversation message +type GenieExecuteMessageQueryRequest struct { + // Conversation ID + ConversationId string `json:"-" url:"-"` + // Message ID + MessageId string `json:"-" url:"-"` + // Genie space ID + SpaceId string `json:"-" url:"-"` +} + +// Get conversation message +type GenieGetConversationMessageRequest struct { + // The ID associated with the target conversation. + ConversationId string `json:"-" url:"-"` + // The ID associated with the target message from the identified + // conversation. + MessageId string `json:"-" url:"-"` + // The ID associated with the Genie space where the target conversation is + // located. + SpaceId string `json:"-" url:"-"` +} + +// Get conversation message SQL query result +type GenieGetMessageQueryResultRequest struct { + // Conversation ID + ConversationId string `json:"-" url:"-"` + // Message ID + MessageId string `json:"-" url:"-"` + // Genie space ID + SpaceId string `json:"-" url:"-"` +} + +type GenieGetMessageQueryResultResponse struct { + // SQL Statement Execution response. See [Get status, manifest, and result + // first chunk](:method:statementexecution/getstatement) for more details. + StatementResponse *StatementResponse `json:"statement_response,omitempty"` +} + +// Get conversation message SQL query result by attachment id +type GenieGetQueryResultByAttachmentRequest struct { + // Attachment ID + AttachmentId string `json:"-" url:"-"` + // Conversation ID + ConversationId string `json:"-" url:"-"` + // Message ID + MessageId string `json:"-" url:"-"` + // Genie space ID + SpaceId string `json:"-" url:"-"` +} + +type GenieMessage struct { + // AI produced response to the message + Attachments []GenieAttachment `json:"attachments,omitempty"` + // User message content + Content string `json:"content"` + // Conversation ID + ConversationId string `json:"conversation_id"` + // Timestamp when the message was created + CreatedTimestamp int64 `json:"created_timestamp,omitempty"` + // Error message if AI failed to respond to the message + Error *MessageError `json:"error,omitempty"` + // Message ID + Id string `json:"id"` + // Timestamp when the message was last updated + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // The result of SQL query if the message has a query attachment + QueryResult *Result `json:"query_result,omitempty"` + // Genie space ID + SpaceId string `json:"space_id"` + // MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching + // metadata from the data sources. * `FILTERING_CONTEXT`: Running smart + // context step to determine relevant context. * `ASKING_AI`: Waiting for + // the LLM to respond to the users question. * `PENDING_WAREHOUSE`: Waiting + // for warehouse before the SQL query can start executing. * + // `EXECUTING_QUERY`: Executing AI provided SQL query. Get the SQL query + // result by calling + // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. + // **Important: The message status will stay in the `EXECUTING_QUERY` until + // a client calls + // [getMessageQueryResult](:method:genie/getMessageQueryResult)**. * + // `FAILED`: Generating a response or the executing the query failed. Please + // see `error` field. * `COMPLETED`: Message processing is completed. + // Results are in the `attachments` field. Get the SQL query result by + // calling [getMessageQueryResult](:method:genie/getMessageQueryResult) API. + // * `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL + // result is not available anymore. The user needs to execute the query + // again. * `CANCELLED`: Message has been cancelled. + Status MessageStatus `json:"status,omitempty"` + // ID of the user who created the message + UserId int64 `json:"user_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GenieMessage) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenieMessage) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GenieStartConversationMessageRequest struct { + // The text of the message that starts the conversation. + Content string `json:"content"` + // The ID associated with the Genie space where you want to start a + // conversation. + SpaceId string `json:"-" url:"-"` +} + +type GenieStartConversationResponse struct { + Conversation *GenieConversation `json:"conversation,omitempty"` + // Conversation ID + ConversationId string `json:"conversation_id"` + + Message *GenieMessage `json:"message,omitempty"` + // Message ID + MessageId string `json:"message_id"` +} + +// Get dashboard +type GetDashboardRequest struct { + // UUID identifying the dashboard. + DashboardId string `json:"-" url:"-"` +} + +// Read a published dashboard in an embedded ui. +type GetPublishedDashboardEmbeddedRequest struct { + // UUID identifying the published dashboard. + DashboardId string `json:"-" url:"-"` +} + +type GetPublishedDashboardEmbeddedResponse struct { +} + +// Get published dashboard +type GetPublishedDashboardRequest struct { + // UUID identifying the published dashboard. + DashboardId string `json:"-" url:"-"` +} + +// Get dashboard schedule +type GetScheduleRequest struct { + // UUID identifying the dashboard to which the schedule belongs. + DashboardId string `json:"-" url:"-"` + // UUID identifying the schedule. + ScheduleId string `json:"-" url:"-"` +} + +// Get schedule subscription +type GetSubscriptionRequest struct { + // UUID identifying the dashboard which the subscription belongs. + DashboardId string `json:"-" url:"-"` + // UUID identifying the schedule which the subscription belongs. + ScheduleId string `json:"-" url:"-"` + // UUID identifying the subscription. + SubscriptionId string `json:"-" url:"-"` +} + +type LifecycleState string + +const LifecycleStateActive LifecycleState = `ACTIVE` + +const LifecycleStateTrashed LifecycleState = `TRASHED` + +// String representation for [fmt.Print] +func (f *LifecycleState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *LifecycleState) Set(v string) error { + switch v { + case `ACTIVE`, `TRASHED`: + *f = LifecycleState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "TRASHED"`, v) + } +} + +// Type always returns LifecycleState to satisfy [pflag.Value] interface +func (f *LifecycleState) Type() string { + return "LifecycleState" +} + +// List dashboards +type ListDashboardsRequest struct { + // The number of dashboards to return per page. + PageSize int `json:"-" url:"page_size,omitempty"` + // A page token, received from a previous `ListDashboards` call. This token + // can be used to retrieve the subsequent page. + PageToken string `json:"-" url:"page_token,omitempty"` + // The flag to include dashboards located in the trash. If unspecified, only + // active dashboards will be returned. + ShowTrashed bool `json:"-" url:"show_trashed,omitempty"` + // `DASHBOARD_VIEW_BASIC`only includes summary metadata from the dashboard. + View DashboardView `json:"-" url:"view,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListDashboardsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListDashboardsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListDashboardsResponse struct { + Dashboards []Dashboard `json:"dashboards,omitempty"` + // A token, which can be sent as `page_token` to retrieve the next page. If + // this field is omitted, there are no subsequent dashboards. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListDashboardsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListDashboardsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List dashboard schedules +type ListSchedulesRequest struct { + // UUID identifying the dashboard to which the schedules belongs. + DashboardId string `json:"-" url:"-"` + // The number of schedules to return per page. + PageSize int `json:"-" url:"page_size,omitempty"` + // A page token, received from a previous `ListSchedules` call. Use this to + // retrieve the subsequent page. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListSchedulesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListSchedulesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListSchedulesResponse struct { + // A token that can be used as a `page_token` in subsequent requests to + // retrieve the next page of results. If this field is omitted, there are no + // subsequent schedules. + NextPageToken string `json:"next_page_token,omitempty"` + + Schedules []Schedule `json:"schedules,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListSchedulesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListSchedulesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List schedule subscriptions +type ListSubscriptionsRequest struct { + // UUID identifying the dashboard which the subscriptions belongs. + DashboardId string `json:"-" url:"-"` + // The number of subscriptions to return per page. + PageSize int `json:"-" url:"page_size,omitempty"` + // A page token, received from a previous `ListSubscriptions` call. Use this + // to retrieve the subsequent page. + PageToken string `json:"-" url:"page_token,omitempty"` + // UUID identifying the schedule which the subscriptions belongs. + ScheduleId string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListSubscriptionsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListSubscriptionsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListSubscriptionsResponse struct { + // A token that can be used as a `page_token` in subsequent requests to + // retrieve the next page of results. If this field is omitted, there are no + // subsequent subscriptions. + NextPageToken string `json:"next_page_token,omitempty"` + + Subscriptions []Subscription `json:"subscriptions,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListSubscriptionsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListSubscriptionsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type MessageError struct { + Error string `json:"error,omitempty"` + + Type MessageErrorType `json:"type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MessageError) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MessageError) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type MessageErrorType string + +const MessageErrorTypeBlockMultipleExecutionsException MessageErrorType = `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION` + +const MessageErrorTypeChatCompletionClientException MessageErrorType = `CHAT_COMPLETION_CLIENT_EXCEPTION` + +const MessageErrorTypeChatCompletionClientTimeoutException MessageErrorType = `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION` + +const MessageErrorTypeChatCompletionNetworkException MessageErrorType = `CHAT_COMPLETION_NETWORK_EXCEPTION` + +const MessageErrorTypeContentFilterException MessageErrorType = `CONTENT_FILTER_EXCEPTION` + +const MessageErrorTypeContextExceededException MessageErrorType = `CONTEXT_EXCEEDED_EXCEPTION` + +const MessageErrorTypeCouldNotGetUcSchemaException MessageErrorType = `COULD_NOT_GET_UC_SCHEMA_EXCEPTION` + +const MessageErrorTypeDeploymentNotFoundException MessageErrorType = `DEPLOYMENT_NOT_FOUND_EXCEPTION` + +const MessageErrorTypeFunctionsNotAvailableException MessageErrorType = `FUNCTIONS_NOT_AVAILABLE_EXCEPTION` + +const MessageErrorTypeFunctionArgumentsInvalidException MessageErrorType = `FUNCTION_ARGUMENTS_INVALID_EXCEPTION` + +const MessageErrorTypeFunctionArgumentsInvalidJsonException MessageErrorType = `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION` + +const MessageErrorTypeFunctionCallMissingParameterException MessageErrorType = `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION` + +const MessageErrorTypeGenericChatCompletionException MessageErrorType = `GENERIC_CHAT_COMPLETION_EXCEPTION` + +const MessageErrorTypeGenericChatCompletionServiceException MessageErrorType = `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION` + +const MessageErrorTypeGenericSqlExecApiCallException MessageErrorType = `GENERIC_SQL_EXEC_API_CALL_EXCEPTION` + +const MessageErrorTypeIllegalParameterDefinitionException MessageErrorType = `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION` + +const MessageErrorTypeInvalidCertifiedAnswerFunctionException MessageErrorType = `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION` + +const MessageErrorTypeInvalidCertifiedAnswerIdentifierException MessageErrorType = `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION` + +const MessageErrorTypeInvalidChatCompletionJsonException MessageErrorType = `INVALID_CHAT_COMPLETION_JSON_EXCEPTION` + +const MessageErrorTypeInvalidCompletionRequestException MessageErrorType = `INVALID_COMPLETION_REQUEST_EXCEPTION` + +const MessageErrorTypeInvalidFunctionCallException MessageErrorType = `INVALID_FUNCTION_CALL_EXCEPTION` + +const MessageErrorTypeInvalidTableIdentifierException MessageErrorType = `INVALID_TABLE_IDENTIFIER_EXCEPTION` + +const MessageErrorTypeLocalContextExceededException MessageErrorType = `LOCAL_CONTEXT_EXCEEDED_EXCEPTION` + +const MessageErrorTypeMessageDeletedWhileExecutingException MessageErrorType = `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION` + +const MessageErrorTypeMessageUpdatedWhileExecutingException MessageErrorType = `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION` + +const MessageErrorTypeNoDeploymentsAvailableToWorkspace MessageErrorType = `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE` + +const MessageErrorTypeNoQueryToVisualizeException MessageErrorType = `NO_QUERY_TO_VISUALIZE_EXCEPTION` + +const MessageErrorTypeNoTablesToQueryException MessageErrorType = `NO_TABLES_TO_QUERY_EXCEPTION` + +const MessageErrorTypeRateLimitExceededGenericException MessageErrorType = `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION` + +const MessageErrorTypeRateLimitExceededSpecifiedWaitException MessageErrorType = `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION` + +const MessageErrorTypeReplyProcessTimeoutException MessageErrorType = `REPLY_PROCESS_TIMEOUT_EXCEPTION` + +const MessageErrorTypeRetryableProcessingException MessageErrorType = `RETRYABLE_PROCESSING_EXCEPTION` + +const MessageErrorTypeSqlExecutionException MessageErrorType = `SQL_EXECUTION_EXCEPTION` + +const MessageErrorTypeStopProcessDueToAutoRegenerate MessageErrorType = `STOP_PROCESS_DUE_TO_AUTO_REGENERATE` + +const MessageErrorTypeTablesMissingException MessageErrorType = `TABLES_MISSING_EXCEPTION` + +const MessageErrorTypeTooManyCertifiedAnswersException MessageErrorType = `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION` + +const MessageErrorTypeTooManyTablesException MessageErrorType = `TOO_MANY_TABLES_EXCEPTION` + +const MessageErrorTypeUnexpectedReplyProcessException MessageErrorType = `UNEXPECTED_REPLY_PROCESS_EXCEPTION` + +const MessageErrorTypeUnknownAiModel MessageErrorType = `UNKNOWN_AI_MODEL` + +const MessageErrorTypeWarehouseAccessMissingException MessageErrorType = `WAREHOUSE_ACCESS_MISSING_EXCEPTION` + +const MessageErrorTypeWarehouseNotFoundException MessageErrorType = `WAREHOUSE_NOT_FOUND_EXCEPTION` + +// String representation for [fmt.Print] +func (f *MessageErrorType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MessageErrorType) Set(v string) error { + switch v { + case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: + *f = MessageErrorType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) + } +} + +// Type always returns MessageErrorType to satisfy [pflag.Value] interface +func (f *MessageErrorType) Type() string { + return "MessageErrorType" +} + +// MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching +// metadata from the data sources. * `FILTERING_CONTEXT`: Running smart context +// step to determine relevant context. * `ASKING_AI`: Waiting for the LLM to +// respond to the users question. * `PENDING_WAREHOUSE`: Waiting for warehouse +// before the SQL query can start executing. * `EXECUTING_QUERY`: Executing AI +// provided SQL query. Get the SQL query result by calling +// [getMessageQueryResult](:method:genie/getMessageQueryResult) API. +// **Important: The message status will stay in the `EXECUTING_QUERY` until a +// client calls [getMessageQueryResult](:method:genie/getMessageQueryResult)**. +// * `FAILED`: Generating a response or the executing the query failed. Please +// see `error` field. * `COMPLETED`: Message processing is completed. Results +// are in the `attachments` field. Get the SQL query result by calling +// [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * +// `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL result +// is not available anymore. The user needs to execute the query again. * +// `CANCELLED`: Message has been cancelled. +type MessageStatus string + +// Waiting for the LLM to respond to the users question. +const MessageStatusAskingAi MessageStatus = `ASKING_AI` + +// Message has been cancelled. +const MessageStatusCancelled MessageStatus = `CANCELLED` + +// Message processing is completed. Results are in the `attachments` field. Get +// the SQL query result by calling +// [getMessageQueryResult](:method:genie/getMessageQueryResult) API. +const MessageStatusCompleted MessageStatus = `COMPLETED` + +// Executing AI provided SQL query. Get the SQL query result by calling +// [getMessageQueryResult](:method:genie/getMessageQueryResult) API. +// **Important: The message status will stay in the `EXECUTING_QUERY` until a +// client calls [getMessageQueryResult](:method:genie/getMessageQueryResult)**. +const MessageStatusExecutingQuery MessageStatus = `EXECUTING_QUERY` + +// Generating a response or the executing the query failed. Please see `error` +// field. +const MessageStatusFailed MessageStatus = `FAILED` + +// Fetching metadata from the data sources. +const MessageStatusFetchingMetadata MessageStatus = `FETCHING_METADATA` + +// Running smart context step to determine relevant context. +const MessageStatusFilteringContext MessageStatus = `FILTERING_CONTEXT` + +// Waiting for warehouse before the SQL query can start executing. +const MessageStatusPendingWarehouse MessageStatus = `PENDING_WAREHOUSE` + +// SQL result is not available anymore. The user needs to execute the query +// again. +const MessageStatusQueryResultExpired MessageStatus = `QUERY_RESULT_EXPIRED` + +// Message has been submitted. +const MessageStatusSubmitted MessageStatus = `SUBMITTED` + +// String representation for [fmt.Print] +func (f *MessageStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MessageStatus) Set(v string) error { + switch v { + case `ASKING_AI`, `CANCELLED`, `COMPLETED`, `EXECUTING_QUERY`, `FAILED`, `FETCHING_METADATA`, `FILTERING_CONTEXT`, `PENDING_WAREHOUSE`, `QUERY_RESULT_EXPIRED`, `SUBMITTED`: + *f = MessageStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ASKING_AI", "CANCELLED", "COMPLETED", "EXECUTING_QUERY", "FAILED", "FETCHING_METADATA", "FILTERING_CONTEXT", "PENDING_WAREHOUSE", "QUERY_RESULT_EXPIRED", "SUBMITTED"`, v) + } +} + +// Type always returns MessageStatus to satisfy [pflag.Value] interface +func (f *MessageStatus) Type() string { + return "MessageStatus" +} + +type MigrateDashboardRequest struct { + // Display name for the new Lakeview dashboard. + DisplayName string `json:"display_name,omitempty"` + // The workspace path of the folder to contain the migrated Lakeview + // dashboard. + ParentPath string `json:"parent_path,omitempty"` + // UUID of the dashboard to be migrated. + SourceDashboardId string `json:"source_dashboard_id"` + // Flag to indicate if mustache parameter syntax ({{ param }}) should be + // auto-updated to named syntax (:param) when converting datasets in the + // dashboard. + UpdateParameterSyntax bool `json:"update_parameter_syntax,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MigrateDashboardRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MigrateDashboardRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PendingStatus struct { + // The token to poll for result asynchronously Example: + // EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + DataToken string `json:"data_token"` +} + +// Poll the results for the a query for a published, embedded dashboard +type PollPublishedQueryStatusRequest struct { + DashboardName string `json:"-" url:"dashboard_name"` + + DashboardRevisionId string `json:"-" url:"dashboard_revision_id"` + // Example: + // EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + Tokens []string `json:"-" url:"tokens,omitempty"` +} + +type PollQueryStatusResponse struct { + Data []PollQueryStatusResponseData `json:"data,omitempty"` +} + +type PollQueryStatusResponseData struct { + Status QueryResponseStatus `json:"status"` +} + +type PublishRequest struct { + // UUID identifying the dashboard to be published. + DashboardId string `json:"-" url:"-"` + // Flag to indicate if the publisher's credentials should be embedded in the + // published dashboard. These embedded credentials will be used to execute + // the published dashboard's queries. + EmbedCredentials bool `json:"embed_credentials,omitempty"` + // The ID of the warehouse that can be used to override the warehouse which + // was set in the draft. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PublishRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PublishRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PublishedDashboard struct { + // The display name of the published dashboard. + DisplayName string `json:"display_name,omitempty"` + // Indicates whether credentials are embedded in the published dashboard. + EmbedCredentials bool `json:"embed_credentials,omitempty"` + // The timestamp of when the published dashboard was last revised. + RevisionCreateTime string `json:"revision_create_time,omitempty"` + // The warehouse ID used to run the published dashboard. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PublishedDashboard) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PublishedDashboard) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryAttachment struct { + CachedQuerySchema *QuerySchema `json:"cached_query_schema,omitempty"` + // Description of the query + Description string `json:"description,omitempty"` + + Id string `json:"id,omitempty"` + // If the query was created on an instruction (trusted asset) we link to the + // id + InstructionId string `json:"instruction_id,omitempty"` + // Always store the title next to the id in case the original instruction + // title changes or the instruction is deleted. + InstructionTitle string `json:"instruction_title,omitempty"` + // Time when the user updated the query last + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // AI generated SQL query + Query string `json:"query,omitempty"` + + StatementId string `json:"statement_id,omitempty"` + // Name of the query + Title string `json:"title,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryAttachment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryAttachment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryResponseStatus struct { + // Represents an empty message, similar to google.protobuf.Empty, which is + // not available in the firm right now. + Canceled *Empty `json:"canceled,omitempty"` + // Represents an empty message, similar to google.protobuf.Empty, which is + // not available in the firm right now. + Closed *Empty `json:"closed,omitempty"` + + Pending *PendingStatus `json:"pending,omitempty"` + // The statement id in format(01eef5da-c56e-1f36-bafa-21906587d6ba) The + // statement_id should be identical to data_token in SuccessStatus and + // PendingStatus. This field is created for audit logging purpose to record + // the statement_id of all QueryResponseStatus. + StatementId string `json:"statement_id,omitempty"` + + Success *SuccessStatus `json:"success,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryResponseStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryResponseStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QuerySchema struct { + Columns []QuerySchemaColumn `json:"columns,omitempty"` + // Used to determine if the stored query schema is compatible with the + // latest run. The service should always clear the schema when the query is + // re-executed. + StatementId string `json:"statement_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QuerySchema) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QuerySchema) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QuerySchemaColumn struct { + // Populated from + // https://docs.databricks.com/sql/language-manual/sql-ref-datatypes.html + DataType DataType `json:"data_type"` + + Name string `json:"name"` + // Corresponds to type desc + TypeText string `json:"type_text"` +} + +type Result struct { + // If result is truncated + IsTruncated bool `json:"is_truncated,omitempty"` + // Row count of the result + RowCount int64 `json:"row_count,omitempty"` + // Statement Execution API statement id. Use [Get status, manifest, and + // result first chunk](:method:statementexecution/getstatement) to get the + // full result data. + StatementId string `json:"statement_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Result) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Result) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ResultData struct { + // The number of bytes in the result chunk. This field is not available when + // using `INLINE` disposition. + ByteCount int64 `json:"byte_count,omitempty"` + // The position within the sequence of result set chunks. + ChunkIndex int `json:"chunk_index,omitempty"` + // The `JSON_ARRAY` format is an array of arrays of values, where each + // non-null value is formatted as a string. Null values are encoded as JSON + // `null`. + DataArray [][]string `json:"data_array,omitempty"` + + ExternalLinks []ExternalLink `json:"external_links,omitempty"` + // When fetching, provides the `chunk_index` for the _next_ chunk. If + // absent, indicates there are no more chunks. The next chunk can be fetched + // with a :method:statementexecution/getStatementResultChunkN request. + NextChunkIndex int `json:"next_chunk_index,omitempty"` + // When fetching, provides a link to fetch the _next_ chunk. If absent, + // indicates there are no more chunks. This link is an absolute `path` to be + // joined with your `$DATABRICKS_HOST`, and should be treated as an opaque + // link. This is an alternative to using `next_chunk_index`. + NextChunkInternalLink string `json:"next_chunk_internal_link,omitempty"` + // The number of rows within the result chunk. + RowCount int64 `json:"row_count,omitempty"` + // The starting row offset within the result set. + RowOffset int64 `json:"row_offset,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResultData) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResultData) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The result manifest provides schema and metadata for the result set. +type ResultManifest struct { + // Array of result set chunk metadata. + Chunks []BaseChunkInfo `json:"chunks,omitempty"` + + Format Format `json:"format,omitempty"` + // The schema is an ordered list of column descriptions. + Schema *ResultSchema `json:"schema,omitempty"` + // The total number of bytes in the result set. This field is not available + // when using `INLINE` disposition. + TotalByteCount int64 `json:"total_byte_count,omitempty"` + // The total number of chunks that the result set has been divided into. + TotalChunkCount int `json:"total_chunk_count,omitempty"` + // The total number of rows in the result set. + TotalRowCount int64 `json:"total_row_count,omitempty"` + // Indicates whether the result is truncated due to `row_limit` or + // `byte_limit`. + Truncated bool `json:"truncated,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResultManifest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResultManifest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The schema is an ordered list of column descriptions. +type ResultSchema struct { + ColumnCount int `json:"column_count,omitempty"` + + Columns []ColumnInfo `json:"columns,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResultSchema) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResultSchema) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Schedule struct { + // A timestamp indicating when the schedule was created. + CreateTime string `json:"create_time,omitempty"` + // The cron expression describing the frequency of the periodic refresh for + // this schedule. + CronSchedule CronSchedule `json:"cron_schedule"` + // UUID identifying the dashboard to which the schedule belongs. + DashboardId string `json:"dashboard_id,omitempty"` + // The display name for schedule. + DisplayName string `json:"display_name,omitempty"` + // The etag for the schedule. Must be left empty on create, must be provided + // on updates to ensure that the schedule has not been modified since the + // last read, and can be optionally provided on delete. + Etag string `json:"etag,omitempty"` + // The status indicates whether this schedule is paused or not. + PauseStatus SchedulePauseStatus `json:"pause_status,omitempty"` + // UUID identifying the schedule. + ScheduleId string `json:"schedule_id,omitempty"` + // A timestamp indicating when the schedule was last updated. + UpdateTime string `json:"update_time,omitempty"` + // The warehouse id to run the dashboard with for the schedule. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Schedule) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Schedule) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SchedulePauseStatus string + +const SchedulePauseStatusPaused SchedulePauseStatus = `PAUSED` + +const SchedulePauseStatusUnpaused SchedulePauseStatus = `UNPAUSED` + +// String representation for [fmt.Print] +func (f *SchedulePauseStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SchedulePauseStatus) Set(v string) error { + switch v { + case `PAUSED`, `UNPAUSED`: + *f = SchedulePauseStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PAUSED", "UNPAUSED"`, v) + } +} + +// Type always returns SchedulePauseStatus to satisfy [pflag.Value] interface +func (f *SchedulePauseStatus) Type() string { + return "SchedulePauseStatus" +} + +type ServiceError struct { + ErrorCode ServiceErrorCode `json:"error_code,omitempty"` + // A brief summary of the error condition. + Message string `json:"message,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServiceError) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServiceError) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServiceErrorCode string + +const ServiceErrorCodeAborted ServiceErrorCode = `ABORTED` + +const ServiceErrorCodeAlreadyExists ServiceErrorCode = `ALREADY_EXISTS` + +const ServiceErrorCodeBadRequest ServiceErrorCode = `BAD_REQUEST` + +const ServiceErrorCodeCancelled ServiceErrorCode = `CANCELLED` + +const ServiceErrorCodeDeadlineExceeded ServiceErrorCode = `DEADLINE_EXCEEDED` + +const ServiceErrorCodeInternalError ServiceErrorCode = `INTERNAL_ERROR` + +const ServiceErrorCodeIoError ServiceErrorCode = `IO_ERROR` + +const ServiceErrorCodeNotFound ServiceErrorCode = `NOT_FOUND` + +const ServiceErrorCodeResourceExhausted ServiceErrorCode = `RESOURCE_EXHAUSTED` + +const ServiceErrorCodeServiceUnderMaintenance ServiceErrorCode = `SERVICE_UNDER_MAINTENANCE` + +const ServiceErrorCodeTemporarilyUnavailable ServiceErrorCode = `TEMPORARILY_UNAVAILABLE` + +const ServiceErrorCodeUnauthenticated ServiceErrorCode = `UNAUTHENTICATED` + +const ServiceErrorCodeUnknown ServiceErrorCode = `UNKNOWN` + +const ServiceErrorCodeWorkspaceTemporarilyUnavailable ServiceErrorCode = `WORKSPACE_TEMPORARILY_UNAVAILABLE` + +// String representation for [fmt.Print] +func (f *ServiceErrorCode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServiceErrorCode) Set(v string) error { + switch v { + case `ABORTED`, `ALREADY_EXISTS`, `BAD_REQUEST`, `CANCELLED`, `DEADLINE_EXCEEDED`, `INTERNAL_ERROR`, `IO_ERROR`, `NOT_FOUND`, `RESOURCE_EXHAUSTED`, `SERVICE_UNDER_MAINTENANCE`, `TEMPORARILY_UNAVAILABLE`, `UNAUTHENTICATED`, `UNKNOWN`, `WORKSPACE_TEMPORARILY_UNAVAILABLE`: + *f = ServiceErrorCode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ABORTED", "ALREADY_EXISTS", "BAD_REQUEST", "CANCELLED", "DEADLINE_EXCEEDED", "INTERNAL_ERROR", "IO_ERROR", "NOT_FOUND", "RESOURCE_EXHAUSTED", "SERVICE_UNDER_MAINTENANCE", "TEMPORARILY_UNAVAILABLE", "UNAUTHENTICATED", "UNKNOWN", "WORKSPACE_TEMPORARILY_UNAVAILABLE"`, v) + } +} + +// Type always returns ServiceErrorCode to satisfy [pflag.Value] interface +func (f *ServiceErrorCode) Type() string { + return "ServiceErrorCode" +} + +type StatementResponse struct { + // The result manifest provides schema and metadata for the result set. + Manifest *ResultManifest `json:"manifest,omitempty"` + + Result *ResultData `json:"result,omitempty"` + // The statement ID is returned upon successfully submitting a SQL + // statement, and is a required reference for all subsequent calls. + StatementId string `json:"statement_id,omitempty"` + // The status response includes execution state and if relevant, error + // information. + Status *StatementStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *StatementResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s StatementResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: +// running - `SUCCEEDED`: execution was successful, result data available for +// fetch - `FAILED`: execution failed; reason for failure described in +// accomanying error message - `CANCELED`: user canceled; can come from explicit +// cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: execution +// successful, and statement closed; result no longer available for fetch +type StatementState string + +// user canceled; can come from explicit cancel call, or timeout with +// `on_wait_timeout=CANCEL` +const StatementStateCanceled StatementState = `CANCELED` + +// execution successful, and statement closed; result no longer available for +// fetch +const StatementStateClosed StatementState = `CLOSED` + +// execution failed; reason for failure described in accomanying error message +const StatementStateFailed StatementState = `FAILED` + +// waiting for warehouse +const StatementStatePending StatementState = `PENDING` + +// running +const StatementStateRunning StatementState = `RUNNING` + +// execution was successful, result data available for fetch +const StatementStateSucceeded StatementState = `SUCCEEDED` + +// String representation for [fmt.Print] +func (f *StatementState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *StatementState) Set(v string) error { + switch v { + case `CANCELED`, `CLOSED`, `FAILED`, `PENDING`, `RUNNING`, `SUCCEEDED`: + *f = StatementState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "CLOSED", "FAILED", "PENDING", "RUNNING", "SUCCEEDED"`, v) + } +} + +// Type always returns StatementState to satisfy [pflag.Value] interface +func (f *StatementState) Type() string { + return "StatementState" +} + +// The status response includes execution state and if relevant, error +// information. +type StatementStatus struct { + Error *ServiceError `json:"error,omitempty"` + // Statement execution state: - `PENDING`: waiting for warehouse - + // `RUNNING`: running - `SUCCEEDED`: execution was successful, result data + // available for fetch - `FAILED`: execution failed; reason for failure + // described in accomanying error message - `CANCELED`: user canceled; can + // come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` + // - `CLOSED`: execution successful, and statement closed; result no longer + // available for fetch + State StatementState `json:"state,omitempty"` +} + +type Subscriber struct { + // The destination to receive the subscription email. This parameter is + // mutually exclusive with `user_subscriber`. + DestinationSubscriber *SubscriptionSubscriberDestination `json:"destination_subscriber,omitempty"` + // The user to receive the subscription email. This parameter is mutually + // exclusive with `destination_subscriber`. + UserSubscriber *SubscriptionSubscriberUser `json:"user_subscriber,omitempty"` +} + +type Subscription struct { + // A timestamp indicating when the subscription was created. + CreateTime string `json:"create_time,omitempty"` + // UserId of the user who adds subscribers (users or notification + // destinations) to the dashboard's schedule. + CreatedByUserId int64 `json:"created_by_user_id,omitempty"` + // UUID identifying the dashboard to which the subscription belongs. + DashboardId string `json:"dashboard_id,omitempty"` + // The etag for the subscription. Must be left empty on create, can be + // optionally provided on delete to ensure that the subscription has not + // been deleted since the last read. + Etag string `json:"etag,omitempty"` + // UUID identifying the schedule to which the subscription belongs. + ScheduleId string `json:"schedule_id,omitempty"` + // Subscriber details for users and destinations to be added as subscribers + // to the schedule. + Subscriber Subscriber `json:"subscriber"` + // UUID identifying the subscription. + SubscriptionId string `json:"subscription_id,omitempty"` + // A timestamp indicating when the subscription was last updated. + UpdateTime string `json:"update_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Subscription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Subscription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SubscriptionSubscriberDestination struct { + // The canonical identifier of the destination to receive email + // notification. + DestinationId string `json:"destination_id"` +} + +type SubscriptionSubscriberUser struct { + // UserId of the subscriber. + UserId int64 `json:"user_id"` +} + +type SuccessStatus struct { + // The token to poll for result asynchronously Example: + // EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + DataToken string `json:"data_token"` + // Whether the query result is truncated (either by byte limit or row limit) + Truncated bool `json:"truncated,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SuccessStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SuccessStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TextAttachment struct { + // AI generated message + Content string `json:"content,omitempty"` + + Id string `json:"id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TextAttachment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TextAttachment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Trash dashboard +type TrashDashboardRequest struct { + // UUID identifying the dashboard. + DashboardId string `json:"-" url:"-"` +} + +type TrashDashboardResponse struct { +} + +// Unpublish dashboard +type UnpublishDashboardRequest struct { + // UUID identifying the published dashboard. + DashboardId string `json:"-" url:"-"` +} + +type UnpublishDashboardResponse struct { +} + +// Update dashboard +type UpdateDashboardRequest struct { + Dashboard *Dashboard `json:"dashboard,omitempty"` + // UUID identifying the dashboard. + DashboardId string `json:"-" url:"-"` +} + +// Update dashboard schedule +type UpdateScheduleRequest struct { + // UUID identifying the dashboard to which the schedule belongs. + DashboardId string `json:"-" url:"-"` + + Schedule *Schedule `json:"schedule,omitempty"` + // UUID identifying the schedule. + ScheduleId string `json:"-" url:"-"` +} diff --git a/files/v2preview/api.go b/files/v2preview/api.go new file mode 100755 index 000000000..1410f5134 --- /dev/null +++ b/files/v2preview/api.go @@ -0,0 +1,479 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Dbfs Preview, Files Preview, etc. +package filespreview + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" +) + +type DbfsPreviewInterface interface { + + // Append data block. + // + // Appends a block of data to the stream specified by the input handle. If the + // handle does not exist, this call will throw an exception with + // ``RESOURCE_DOES_NOT_EXIST``. + // + // If the block of data exceeds 1 MB, this call will throw an exception with + // ``MAX_BLOCK_SIZE_EXCEEDED``. + AddBlock(ctx context.Context, request AddBlock) error + + // Close the stream. + // + // Closes the stream specified by the input handle. If the handle does not + // exist, this call throws an exception with ``RESOURCE_DOES_NOT_EXIST``. + Close(ctx context.Context, request Close) error + + // Close the stream. + // + // Closes the stream specified by the input handle. If the handle does not + // exist, this call throws an exception with ``RESOURCE_DOES_NOT_EXIST``. + CloseByHandle(ctx context.Context, handle int64) error + + // Open a stream. + // + // Opens a stream to write to a file and returns a handle to this stream. There + // is a 10 minute idle timeout on this handle. If a file or directory already + // exists on the given path and __overwrite__ is set to false, this call will + // throw an exception with ``RESOURCE_ALREADY_EXISTS``. + // + // A typical workflow for file upload would be: + // + // 1. Issue a ``create`` call and get a handle. 2. Issue one or more + // ``add-block`` calls with the handle you have. 3. Issue a ``close`` call with + // the handle you have. + Create(ctx context.Context, request Create) (*CreateResponse, error) + + // Delete a file/directory. + // + // Delete the file or directory (optionally recursively delete all files in the + // directory). This call throws an exception with `IO_ERROR` if the path is a + // non-empty directory and `recursive` is set to `false` or on other similar + // errors. + // + // When you delete a large number of files, the delete operation is done in + // increments. The call returns a response after approximately 45 seconds with + // an error message (503 Service Unavailable) asking you to re-invoke the delete + // operation until the directory structure is fully deleted. + // + // For operations that delete more than 10K files, we discourage using the DBFS + // REST API, but advise you to perform such operations in the context of a + // cluster, using the [File system utility + // (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs). `dbutils.fs` + // covers the functional scope of the DBFS REST API, but from notebooks. Running + // such operations using notebooks provides better control and manageability, + // such as selective deletes, and the possibility to automate periodic delete + // jobs. + Delete(ctx context.Context, request Delete) error + + // Get the information of a file or directory. + // + // Gets the file information for a file or directory. If the file or directory + // does not exist, this call throws an exception with `RESOURCE_DOES_NOT_EXIST`. + GetStatus(ctx context.Context, request GetStatusRequest) (*FileInfo, error) + + // Get the information of a file or directory. + // + // Gets the file information for a file or directory. If the file or directory + // does not exist, this call throws an exception with `RESOURCE_DOES_NOT_EXIST`. + GetStatusByPath(ctx context.Context, path string) (*FileInfo, error) + + // List directory contents or file details. + // + // List the contents of a directory, or details of the file. If the file or + // directory does not exist, this call throws an exception with + // `RESOURCE_DOES_NOT_EXIST`. + // + // When calling list on a large directory, the list operation will time out + // after approximately 60 seconds. We strongly recommend using list only on + // directories containing less than 10K files and discourage using the DBFS REST + // API for operations that list more than 10K files. Instead, we recommend that + // you perform such operations in the context of a cluster, using the [File + // system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), + // which provides the same functionality without timing out. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListDbfsRequest) listing.Iterator[FileInfo] + + // List directory contents or file details. + // + // List the contents of a directory, or details of the file. If the file or + // directory does not exist, this call throws an exception with + // `RESOURCE_DOES_NOT_EXIST`. + // + // When calling list on a large directory, the list operation will time out + // after approximately 60 seconds. We strongly recommend using list only on + // directories containing less than 10K files and discourage using the DBFS REST + // API for operations that list more than 10K files. Instead, we recommend that + // you perform such operations in the context of a cluster, using the [File + // system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), + // which provides the same functionality without timing out. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListDbfsRequest) ([]FileInfo, error) + + // List directory contents or file details. + // + // List the contents of a directory, or details of the file. If the file or + // directory does not exist, this call throws an exception with + // `RESOURCE_DOES_NOT_EXIST`. + // + // When calling list on a large directory, the list operation will time out + // after approximately 60 seconds. We strongly recommend using list only on + // directories containing less than 10K files and discourage using the DBFS REST + // API for operations that list more than 10K files. Instead, we recommend that + // you perform such operations in the context of a cluster, using the [File + // system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), + // which provides the same functionality without timing out. + ListByPath(ctx context.Context, path string) (*ListStatusResponse, error) + + // Create a directory. + // + // Creates the given directory and necessary parent directories if they do not + // exist. If a file (not a directory) exists at any prefix of the input path, + // this call throws an exception with `RESOURCE_ALREADY_EXISTS`. **Note**: If + // this operation fails, it might have succeeded in creating some of the + // necessary parent directories. + Mkdirs(ctx context.Context, request MkDirs) error + + // Create a directory. + // + // Creates the given directory and necessary parent directories if they do not + // exist. If a file (not a directory) exists at any prefix of the input path, + // this call throws an exception with `RESOURCE_ALREADY_EXISTS`. **Note**: If + // this operation fails, it might have succeeded in creating some of the + // necessary parent directories. + MkdirsByPath(ctx context.Context, path string) error + + // Move a file. + // + // Moves a file from one location to another location within DBFS. If the source + // file does not exist, this call throws an exception with + // `RESOURCE_DOES_NOT_EXIST`. If a file already exists in the destination path, + // this call throws an exception with `RESOURCE_ALREADY_EXISTS`. If the given + // source path is a directory, this call always recursively moves all files. + Move(ctx context.Context, request Move) error + + // Upload a file. + // + // Uploads a file through the use of multipart form post. It is mainly used for + // streaming uploads, but can also be used as a convenient single call for data + // upload. + // + // Alternatively you can pass contents as base64 string. + // + // The amount of data that can be passed (when not streaming) using the + // __contents__ parameter is limited to 1 MB. `MAX_BLOCK_SIZE_EXCEEDED` will be + // thrown if this limit is exceeded. + // + // If you want to upload large files, use the streaming upload. For details, see + // :method:dbfs/create, :method:dbfs/addBlock, :method:dbfs/close. + Put(ctx context.Context, request Put) error + + // Get the contents of a file. + // + // Returns the contents of a file. If the file does not exist, this call throws + // an exception with `RESOURCE_DOES_NOT_EXIST`. If the path is a directory, the + // read length is negative, or if the offset is negative, this call throws an + // exception with `INVALID_PARAMETER_VALUE`. If the read length exceeds 1 MB, + // this call throws an exception with `MAX_READ_SIZE_EXCEEDED`. + // + // If `offset + length` exceeds the number of bytes in a file, it reads the + // contents until the end of file. + Read(ctx context.Context, request ReadDbfsRequest) (*ReadResponse, error) +} + +func NewDbfsPreview(client *client.DatabricksClient) *DbfsPreviewAPI { + return &DbfsPreviewAPI{ + dbfsPreviewImpl: dbfsPreviewImpl{ + client: client, + }, + } +} + +// DBFS API makes it simple to interact with various data sources without having +// to include a users credentials every time to read a file. +type DbfsPreviewAPI struct { + dbfsPreviewImpl +} + +// Close the stream. +// +// Closes the stream specified by the input handle. If the handle does not +// exist, this call throws an exception with “RESOURCE_DOES_NOT_EXIST“. +func (a *DbfsPreviewAPI) CloseByHandle(ctx context.Context, handle int64) error { + return a.dbfsPreviewImpl.Close(ctx, Close{ + Handle: handle, + }) +} + +// Get the information of a file or directory. +// +// Gets the file information for a file or directory. If the file or directory +// does not exist, this call throws an exception with `RESOURCE_DOES_NOT_EXIST`. +func (a *DbfsPreviewAPI) GetStatusByPath(ctx context.Context, path string) (*FileInfo, error) { + return a.dbfsPreviewImpl.GetStatus(ctx, GetStatusRequest{ + Path: path, + }) +} + +// List directory contents or file details. +// +// List the contents of a directory, or details of the file. If the file or +// directory does not exist, this call throws an exception with +// `RESOURCE_DOES_NOT_EXIST`. +// +// When calling list on a large directory, the list operation will time out +// after approximately 60 seconds. We strongly recommend using list only on +// directories containing less than 10K files and discourage using the DBFS REST +// API for operations that list more than 10K files. Instead, we recommend that +// you perform such operations in the context of a cluster, using the [File +// system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), +// which provides the same functionality without timing out. +func (a *DbfsPreviewAPI) ListByPath(ctx context.Context, path string) (*ListStatusResponse, error) { + return a.dbfsPreviewImpl.internalList(ctx, ListDbfsRequest{ + Path: path, + }) +} + +// Create a directory. +// +// Creates the given directory and necessary parent directories if they do not +// exist. If a file (not a directory) exists at any prefix of the input path, +// this call throws an exception with `RESOURCE_ALREADY_EXISTS`. **Note**: If +// this operation fails, it might have succeeded in creating some of the +// necessary parent directories. +func (a *DbfsPreviewAPI) MkdirsByPath(ctx context.Context, path string) error { + return a.dbfsPreviewImpl.Mkdirs(ctx, MkDirs{ + Path: path, + }) +} + +type FilesPreviewInterface interface { + + // Create a directory. + // + // Creates an empty directory. If necessary, also creates any parent directories + // of the new, empty directory (like the shell command `mkdir -p`). If called on + // an existing directory, returns a success response; this method is idempotent + // (it will succeed if the directory already exists). + CreateDirectory(ctx context.Context, request CreateDirectoryRequest) error + + // Delete a file. + // + // Deletes a file. If the request is successful, there is no response body. + Delete(ctx context.Context, request DeleteFileRequest) error + + // Delete a file. + // + // Deletes a file. If the request is successful, there is no response body. + DeleteByFilePath(ctx context.Context, filePath string) error + + // Delete a directory. + // + // Deletes an empty directory. + // + // To delete a non-empty directory, first delete all of its contents. This can + // be done by listing the directory contents and deleting each file and + // subdirectory recursively. + DeleteDirectory(ctx context.Context, request DeleteDirectoryRequest) error + + // Delete a directory. + // + // Deletes an empty directory. + // + // To delete a non-empty directory, first delete all of its contents. This can + // be done by listing the directory contents and deleting each file and + // subdirectory recursively. + DeleteDirectoryByDirectoryPath(ctx context.Context, directoryPath string) error + + // Download a file. + // + // Downloads a file. The file contents are the response body. This is a standard + // HTTP file download, not a JSON RPC. It supports the Range and + // If-Unmodified-Since HTTP headers. + Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) + + // Download a file. + // + // Downloads a file. The file contents are the response body. This is a standard + // HTTP file download, not a JSON RPC. It supports the Range and + // If-Unmodified-Since HTTP headers. + DownloadByFilePath(ctx context.Context, filePath string) (*DownloadResponse, error) + + // Get directory metadata. + // + // Get the metadata of a directory. The response HTTP headers contain the + // metadata. There is no response body. + // + // This method is useful to check if a directory exists and the caller has + // access to it. + // + // If you wish to ensure the directory exists, you can instead use `PUT`, which + // will create the directory if it does not exist, and is idempotent (it will + // succeed if the directory already exists). + GetDirectoryMetadata(ctx context.Context, request GetDirectoryMetadataRequest) error + + // Get directory metadata. + // + // Get the metadata of a directory. The response HTTP headers contain the + // metadata. There is no response body. + // + // This method is useful to check if a directory exists and the caller has + // access to it. + // + // If you wish to ensure the directory exists, you can instead use `PUT`, which + // will create the directory if it does not exist, and is idempotent (it will + // succeed if the directory already exists). + GetDirectoryMetadataByDirectoryPath(ctx context.Context, directoryPath string) error + + // Get file metadata. + // + // Get the metadata of a file. The response HTTP headers contain the metadata. + // There is no response body. + GetMetadata(ctx context.Context, request GetMetadataRequest) (*GetMetadataResponse, error) + + // Get file metadata. + // + // Get the metadata of a file. The response HTTP headers contain the metadata. + // There is no response body. + GetMetadataByFilePath(ctx context.Context, filePath string) (*GetMetadataResponse, error) + + // List directory contents. + // + // Returns the contents of a directory. If there is no directory at the + // specified path, the API returns a HTTP 404 error. + // + // This method is generated by Databricks SDK Code Generator. + ListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) listing.Iterator[DirectoryEntry] + + // List directory contents. + // + // Returns the contents of a directory. If there is no directory at the + // specified path, the API returns a HTTP 404 error. + // + // This method is generated by Databricks SDK Code Generator. + ListDirectoryContentsAll(ctx context.Context, request ListDirectoryContentsRequest) ([]DirectoryEntry, error) + + // List directory contents. + // + // Returns the contents of a directory. If there is no directory at the + // specified path, the API returns a HTTP 404 error. + ListDirectoryContentsByDirectoryPath(ctx context.Context, directoryPath string) (*ListDirectoryResponse, error) + + // Upload a file. + // + // Uploads a file of up to 5 GiB. The file contents should be sent as the + // request body as raw bytes (an octet stream); do not encode or otherwise + // modify the bytes before sending. The contents of the resulting file will be + // exactly the bytes sent in the request body. If the request is successful, + // there is no response body. + Upload(ctx context.Context, request UploadRequest) error +} + +func NewFilesPreview(client *client.DatabricksClient) *FilesPreviewAPI { + return &FilesPreviewAPI{ + filesPreviewImpl: filesPreviewImpl{ + client: client, + }, + } +} + +// The Files API is a standard HTTP API that allows you to read, write, list, +// and delete files and directories by referring to their URI. The API makes +// working with file content as raw bytes easier and more efficient. +// +// The API supports [Unity Catalog volumes], where files and directories to +// operate on are specified using their volume URI path, which follows the +// format +// /Volumes/<catalog_name>/<schema_name>/<volume_name>/<path_to_file>. +// +// The Files API has two distinct endpoints, one for working with files +// (`/fs/files`) and another one for working with directories +// (`/fs/directories`). Both endpoints use the standard HTTP methods GET, HEAD, +// PUT, and DELETE to manage files and directories specified using their URI +// path. The path is always absolute. +// +// Some Files API client features are currently experimental. To enable them, +// set `enable_experimental_files_api_client = True` in your configuration +// profile or use the environment variable +// `DATABRICKS_ENABLE_EXPERIMENTAL_FILES_API_CLIENT=True`. +// +// [Unity Catalog volumes]: https://docs.databricks.com/en/connect/unity-catalog/volumes.html +type FilesPreviewAPI struct { + filesPreviewImpl +} + +// Delete a file. +// +// Deletes a file. If the request is successful, there is no response body. +func (a *FilesPreviewAPI) DeleteByFilePath(ctx context.Context, filePath string) error { + return a.filesPreviewImpl.Delete(ctx, DeleteFileRequest{ + FilePath: filePath, + }) +} + +// Delete a directory. +// +// Deletes an empty directory. +// +// To delete a non-empty directory, first delete all of its contents. This can +// be done by listing the directory contents and deleting each file and +// subdirectory recursively. +func (a *FilesPreviewAPI) DeleteDirectoryByDirectoryPath(ctx context.Context, directoryPath string) error { + return a.filesPreviewImpl.DeleteDirectory(ctx, DeleteDirectoryRequest{ + DirectoryPath: directoryPath, + }) +} + +// Download a file. +// +// Downloads a file. The file contents are the response body. This is a standard +// HTTP file download, not a JSON RPC. It supports the Range and +// If-Unmodified-Since HTTP headers. +func (a *FilesPreviewAPI) DownloadByFilePath(ctx context.Context, filePath string) (*DownloadResponse, error) { + return a.filesPreviewImpl.Download(ctx, DownloadRequest{ + FilePath: filePath, + }) +} + +// Get directory metadata. +// +// Get the metadata of a directory. The response HTTP headers contain the +// metadata. There is no response body. +// +// This method is useful to check if a directory exists and the caller has +// access to it. +// +// If you wish to ensure the directory exists, you can instead use `PUT`, which +// will create the directory if it does not exist, and is idempotent (it will +// succeed if the directory already exists). +func (a *FilesPreviewAPI) GetDirectoryMetadataByDirectoryPath(ctx context.Context, directoryPath string) error { + return a.filesPreviewImpl.GetDirectoryMetadata(ctx, GetDirectoryMetadataRequest{ + DirectoryPath: directoryPath, + }) +} + +// Get file metadata. +// +// Get the metadata of a file. The response HTTP headers contain the metadata. +// There is no response body. +func (a *FilesPreviewAPI) GetMetadataByFilePath(ctx context.Context, filePath string) (*GetMetadataResponse, error) { + return a.filesPreviewImpl.GetMetadata(ctx, GetMetadataRequest{ + FilePath: filePath, + }) +} + +// List directory contents. +// +// Returns the contents of a directory. If there is no directory at the +// specified path, the API returns a HTTP 404 error. +func (a *FilesPreviewAPI) ListDirectoryContentsByDirectoryPath(ctx context.Context, directoryPath string) (*ListDirectoryResponse, error) { + return a.filesPreviewImpl.internalListDirectoryContents(ctx, ListDirectoryContentsRequest{ + DirectoryPath: directoryPath, + }) +} diff --git a/files/v2preview/client.go b/files/v2preview/client.go new file mode 100755 index 000000000..61044cd64 --- /dev/null +++ b/files/v2preview/client.go @@ -0,0 +1,79 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package filespreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type DbfsPreviewClient struct { + DbfsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewDbfsPreviewClient(cfg *config.Config) (*DbfsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &DbfsPreviewClient{ + Config: cfg, + apiClient: apiClient, + DbfsPreviewInterface: NewDbfsPreview(databricksClient), + }, nil +} + +type FilesPreviewClient struct { + FilesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewFilesPreviewClient(cfg *config.Config) (*FilesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &FilesPreviewClient{ + Config: cfg, + apiClient: apiClient, + FilesPreviewInterface: NewFilesPreview(databricksClient), + }, nil +} diff --git a/files/v2preview/impl.go b/files/v2preview/impl.go new file mode 100755 index 000000000..39613fcbe --- /dev/null +++ b/files/v2preview/impl.go @@ -0,0 +1,295 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package filespreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" + "golang.org/x/exp/slices" +) + +// unexported type that holds implementations of just DbfsPreview API methods +type dbfsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *dbfsPreviewImpl) AddBlock(ctx context.Context, request AddBlock) error { + var addBlockResponse AddBlockResponse + path := "/api/2.0preview/dbfs/add-block" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &addBlockResponse) + return err +} + +func (a *dbfsPreviewImpl) Close(ctx context.Context, request Close) error { + var closeResponse CloseResponse + path := "/api/2.0preview/dbfs/close" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &closeResponse) + return err +} + +func (a *dbfsPreviewImpl) Create(ctx context.Context, request Create) (*CreateResponse, error) { + var createResponse CreateResponse + path := "/api/2.0preview/dbfs/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createResponse) + return &createResponse, err +} + +func (a *dbfsPreviewImpl) Delete(ctx context.Context, request Delete) error { + var deleteResponse DeleteResponse + path := "/api/2.0preview/dbfs/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *dbfsPreviewImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*FileInfo, error) { + var fileInfo FileInfo + path := "/api/2.0preview/dbfs/get-status" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &fileInfo) + return &fileInfo, err +} + +// List directory contents or file details. +// +// List the contents of a directory, or details of the file. If the file or +// directory does not exist, this call throws an exception with +// `RESOURCE_DOES_NOT_EXIST`. +// +// When calling list on a large directory, the list operation will time out +// after approximately 60 seconds. We strongly recommend using list only on +// directories containing less than 10K files and discourage using the DBFS REST +// API for operations that list more than 10K files. Instead, we recommend that +// you perform such operations in the context of a cluster, using the [File +// system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), +// which provides the same functionality without timing out. +func (a *dbfsPreviewImpl) List(ctx context.Context, request ListDbfsRequest) listing.Iterator[FileInfo] { + + getNextPage := func(ctx context.Context, req ListDbfsRequest) (*ListStatusResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListStatusResponse) []FileInfo { + return resp.Files + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List directory contents or file details. +// +// List the contents of a directory, or details of the file. If the file or +// directory does not exist, this call throws an exception with +// `RESOURCE_DOES_NOT_EXIST`. +// +// When calling list on a large directory, the list operation will time out +// after approximately 60 seconds. We strongly recommend using list only on +// directories containing less than 10K files and discourage using the DBFS REST +// API for operations that list more than 10K files. Instead, we recommend that +// you perform such operations in the context of a cluster, using the [File +// system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), +// which provides the same functionality without timing out. +func (a *dbfsPreviewImpl) ListAll(ctx context.Context, request ListDbfsRequest) ([]FileInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[FileInfo](ctx, iterator) +} +func (a *dbfsPreviewImpl) internalList(ctx context.Context, request ListDbfsRequest) (*ListStatusResponse, error) { + var listStatusResponse ListStatusResponse + path := "/api/2.0preview/dbfs/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listStatusResponse) + return &listStatusResponse, err +} + +func (a *dbfsPreviewImpl) Mkdirs(ctx context.Context, request MkDirs) error { + var mkDirsResponse MkDirsResponse + path := "/api/2.0preview/dbfs/mkdirs" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &mkDirsResponse) + return err +} + +func (a *dbfsPreviewImpl) Move(ctx context.Context, request Move) error { + var moveResponse MoveResponse + path := "/api/2.0preview/dbfs/move" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &moveResponse) + return err +} + +func (a *dbfsPreviewImpl) Put(ctx context.Context, request Put) error { + var putResponse PutResponse + path := "/api/2.0preview/dbfs/put" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &putResponse) + return err +} + +func (a *dbfsPreviewImpl) Read(ctx context.Context, request ReadDbfsRequest) (*ReadResponse, error) { + var readResponse ReadResponse + path := "/api/2.0preview/dbfs/read" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &readResponse) + return &readResponse, err +} + +// unexported type that holds implementations of just FilesPreview API methods +type filesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *filesPreviewImpl) CreateDirectory(ctx context.Context, request CreateDirectoryRequest) error { + var createDirectoryResponse CreateDirectoryResponse + path := fmt.Sprintf("/api/2.0preview/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, nil, &createDirectoryResponse) + return err +} + +func (a *filesPreviewImpl) Delete(ctx context.Context, request DeleteFileRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *filesPreviewImpl) DeleteDirectory(ctx context.Context, request DeleteDirectoryRequest) error { + var deleteDirectoryResponse DeleteDirectoryResponse + path := fmt.Sprintf("/api/2.0preview/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDirectoryResponse) + return err +} + +func (a *filesPreviewImpl) Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) { + var downloadResponse DownloadResponse + path := fmt.Sprintf("/api/2.0preview/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/octet-stream" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &downloadResponse) + return &downloadResponse, err +} + +func (a *filesPreviewImpl) GetDirectoryMetadata(ctx context.Context, request GetDirectoryMetadataRequest) error { + var getDirectoryMetadataResponse GetDirectoryMetadataResponse + path := fmt.Sprintf("/api/2.0preview/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodHead, path, headers, queryParams, request, &getDirectoryMetadataResponse) + return err +} + +func (a *filesPreviewImpl) GetMetadata(ctx context.Context, request GetMetadataRequest) (*GetMetadataResponse, error) { + var getMetadataResponse GetMetadataResponse + path := fmt.Sprintf("/api/2.0preview/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodHead, path, headers, queryParams, request, &getMetadataResponse) + return &getMetadataResponse, err +} + +// List directory contents. +// +// Returns the contents of a directory. If there is no directory at the +// specified path, the API returns a HTTP 404 error. +func (a *filesPreviewImpl) ListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) listing.Iterator[DirectoryEntry] { + + getNextPage := func(ctx context.Context, req ListDirectoryContentsRequest) (*ListDirectoryResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListDirectoryContents(ctx, req) + } + getItems := func(resp *ListDirectoryResponse) []DirectoryEntry { + return resp.Contents + } + getNextReq := func(resp *ListDirectoryResponse) *ListDirectoryContentsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List directory contents. +// +// Returns the contents of a directory. If there is no directory at the +// specified path, the API returns a HTTP 404 error. +func (a *filesPreviewImpl) ListDirectoryContentsAll(ctx context.Context, request ListDirectoryContentsRequest) ([]DirectoryEntry, error) { + iterator := a.ListDirectoryContents(ctx, request) + return listing.ToSliceN[DirectoryEntry, int64](ctx, iterator, request.PageSize) + +} +func (a *filesPreviewImpl) internalListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) (*ListDirectoryResponse, error) { + var listDirectoryResponse ListDirectoryResponse + path := fmt.Sprintf("/api/2.0preview/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listDirectoryResponse) + return &listDirectoryResponse, err +} + +func (a *filesPreviewImpl) Upload(ctx context.Context, request UploadRequest) error { + var uploadResponse UploadResponse + path := fmt.Sprintf("/api/2.0preview/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) + queryParams := make(map[string]any) + if request.Overwrite != false || slices.Contains(request.ForceSendFields, "Overwrite") { + queryParams["overwrite"] = request.Overwrite + } + headers := make(map[string]string) + headers["Content-Type"] = "application/octet-stream" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request.Contents, &uploadResponse) + return err +} diff --git a/files/v2preview/model.go b/files/v2preview/model.go new file mode 100755 index 000000000..867f52e44 --- /dev/null +++ b/files/v2preview/model.go @@ -0,0 +1,388 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package filespreview + +import ( + "io" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type AddBlock struct { + // The base64-encoded data to append to the stream. This has a limit of 1 + // MB. + Data string `json:"data"` + // The handle on an open stream. + Handle int64 `json:"handle"` +} + +type AddBlockResponse struct { +} + +type Close struct { + // The handle on an open stream. + Handle int64 `json:"handle"` +} + +type CloseResponse struct { +} + +type Create struct { + // The flag that specifies whether to overwrite existing file/files. + Overwrite bool `json:"overwrite,omitempty"` + // The path of the new file. The path should be the absolute DBFS path. + Path string `json:"path"` + + ForceSendFields []string `json:"-"` +} + +func (s *Create) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Create) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Create a directory +type CreateDirectoryRequest struct { + // The absolute path of a directory. + DirectoryPath string `json:"-" url:"-"` +} + +type CreateDirectoryResponse struct { +} + +type CreateResponse struct { + // Handle which should subsequently be passed into the AddBlock and Close + // calls when writing to a file through a stream. + Handle int64 `json:"handle,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Delete struct { + // The path of the file or directory to delete. The path should be the + // absolute DBFS path. + Path string `json:"path"` + // Whether or not to recursively delete the directory's contents. Deleting + // empty directories can be done without providing the recursive flag. + Recursive bool `json:"recursive,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Delete) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Delete) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete a directory +type DeleteDirectoryRequest struct { + // The absolute path of a directory. + DirectoryPath string `json:"-" url:"-"` +} + +type DeleteDirectoryResponse struct { +} + +// Delete a file +type DeleteFileRequest struct { + // The absolute path of the file. + FilePath string `json:"-" url:"-"` +} + +type DeleteResponse struct { +} + +type DirectoryEntry struct { + // The length of the file in bytes. This field is omitted for directories. + FileSize int64 `json:"file_size,omitempty"` + // True if the path is a directory. + IsDirectory bool `json:"is_directory,omitempty"` + // Last modification time of given file in milliseconds since unix epoch. + LastModified int64 `json:"last_modified,omitempty"` + // The name of the file or directory. This is the last component of the + // path. + Name string `json:"name,omitempty"` + // The absolute path of the file or directory. + Path string `json:"path,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DirectoryEntry) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DirectoryEntry) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Download a file +type DownloadRequest struct { + // The absolute path of the file. + FilePath string `json:"-" url:"-"` +} + +type DownloadResponse struct { + ContentLength int64 `json:"-" url:"-" header:"content-length,omitempty"` + + ContentType string `json:"-" url:"-" header:"content-type,omitempty"` + + Contents io.ReadCloser `json:"-"` + + LastModified string `json:"-" url:"-" header:"last-modified,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DownloadResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DownloadResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type FileInfo struct { + // The length of the file in bytes. This field is omitted for directories. + FileSize int64 `json:"file_size,omitempty"` + // True if the path is a directory. + IsDir bool `json:"is_dir,omitempty"` + // Last modification time of given file in milliseconds since epoch. + ModificationTime int64 `json:"modification_time,omitempty"` + // The absolute path of the file or directory. + Path string `json:"path,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *FileInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FileInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get directory metadata +type GetDirectoryMetadataRequest struct { + // The absolute path of a directory. + DirectoryPath string `json:"-" url:"-"` +} + +type GetDirectoryMetadataResponse struct { +} + +// Get file metadata +type GetMetadataRequest struct { + // The absolute path of the file. + FilePath string `json:"-" url:"-"` +} + +type GetMetadataResponse struct { + ContentLength int64 `json:"-" url:"-" header:"content-length,omitempty"` + + ContentType string `json:"-" url:"-" header:"content-type,omitempty"` + + LastModified string `json:"-" url:"-" header:"last-modified,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetMetadataResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetMetadataResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the information of a file or directory +type GetStatusRequest struct { + // The path of the file or directory. The path should be the absolute DBFS + // path. + Path string `json:"-" url:"path"` +} + +// List directory contents or file details +type ListDbfsRequest struct { + // The path of the file or directory. The path should be the absolute DBFS + // path. + Path string `json:"-" url:"path"` +} + +// List directory contents +type ListDirectoryContentsRequest struct { + // The absolute path of a directory. + DirectoryPath string `json:"-" url:"-"` + // The maximum number of directory entries to return. The response may + // contain fewer entries. If the response contains a `next_page_token`, + // there may be more entries, even if fewer than `page_size` entries are in + // the response. + // + // We recommend not to set this value unless you are intentionally listing + // less than the complete directory contents. + // + // If unspecified, at most 1000 directory entries will be returned. The + // maximum value is 1000. Values above 1000 will be coerced to 1000. + PageSize int64 `json:"-" url:"page_size,omitempty"` + // An opaque page token which was the `next_page_token` in the response of + // the previous request to list the contents of this directory. Provide this + // token to retrieve the next page of directory entries. When providing a + // `page_token`, all other parameters provided to the request must match the + // previous request. To list all of the entries in a directory, it is + // necessary to continue requesting pages of entries until the response + // contains no `next_page_token`. Note that the number of entries returned + // must not be used to determine when the listing is complete. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListDirectoryContentsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListDirectoryContentsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListDirectoryResponse struct { + // Array of DirectoryEntry. + Contents []DirectoryEntry `json:"contents,omitempty"` + // A token, which can be sent as `page_token` to retrieve the next page. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListDirectoryResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListDirectoryResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListStatusResponse struct { + // A list of FileInfo's that describe contents of directory or file. See + // example above. + Files []FileInfo `json:"files,omitempty"` +} + +type MkDirs struct { + // The path of the new directory. The path should be the absolute DBFS path. + Path string `json:"path"` +} + +type MkDirsResponse struct { +} + +type Move struct { + // The destination path of the file or directory. The path should be the + // absolute DBFS path. + DestinationPath string `json:"destination_path"` + // The source path of the file or directory. The path should be the absolute + // DBFS path. + SourcePath string `json:"source_path"` +} + +type MoveResponse struct { +} + +type Put struct { + // This parameter might be absent, and instead a posted file will be used. + Contents string `json:"contents,omitempty"` + // The flag that specifies whether to overwrite existing file/files. + Overwrite bool `json:"overwrite,omitempty"` + // The path of the new file. The path should be the absolute DBFS path. + Path string `json:"path"` + + ForceSendFields []string `json:"-"` +} + +func (s *Put) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Put) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PutResponse struct { +} + +// Get the contents of a file +type ReadDbfsRequest struct { + // The number of bytes to read starting from the offset. This has a limit of + // 1 MB, and a default value of 0.5 MB. + Length int64 `json:"-" url:"length,omitempty"` + // The offset to read from in bytes. + Offset int64 `json:"-" url:"offset,omitempty"` + // The path of the file to read. The path should be the absolute DBFS path. + Path string `json:"-" url:"path"` + + ForceSendFields []string `json:"-"` +} + +func (s *ReadDbfsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ReadDbfsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ReadResponse struct { + // The number of bytes read (could be less than ``length`` if we hit end of + // file). This refers to number of bytes read in unencoded version (response + // data is base64-encoded). + BytesRead int64 `json:"bytes_read,omitempty"` + // The base64-encoded contents of the file read. + Data string `json:"data,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ReadResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ReadResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Upload a file +type UploadRequest struct { + Contents io.ReadCloser `json:"-"` + // The absolute path of the file. + FilePath string `json:"-" url:"-"` + // If true, an existing file will be overwritten. + Overwrite bool `json:"-" url:"overwrite,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UploadRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UploadRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UploadResponse struct { +} diff --git a/iam/v2preview/api.go b/iam/v2preview/api.go new file mode 100755 index 000000000..936bfefa1 --- /dev/null +++ b/iam/v2preview/api.go @@ -0,0 +1,1392 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Access Control Preview, Account Access Control Preview, Account Access Control Proxy Preview, Account Groups Preview, Account Service Principals Preview, Account Users Preview, Current User Preview, Groups Preview, Permission Migration Preview, Permissions Preview, Service Principals Preview, Users Preview, Workspace Assignment Preview, etc. +package iampreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type AccessControlPreviewInterface interface { + + // Check access policy to a resource. + CheckPolicy(ctx context.Context, request CheckPolicyRequest) (*CheckPolicyResponse, error) +} + +func NewAccessControlPreview(client *client.DatabricksClient) *AccessControlPreviewAPI { + return &AccessControlPreviewAPI{ + accessControlPreviewImpl: accessControlPreviewImpl{ + client: client, + }, + } +} + +// Rule based Access Control for Databricks Resources. +type AccessControlPreviewAPI struct { + accessControlPreviewImpl +} + +type AccountAccessControlPreviewInterface interface { + + // Get assignable roles for a resource. + // + // Gets all the roles that can be granted on an account level resource. A role + // is grantable if the rule set on the resource can contain an access rule of + // the role. + GetAssignableRolesForResource(ctx context.Context, request GetAssignableRolesForResourceRequest) (*GetAssignableRolesForResourceResponse, error) + + // Get a rule set. + // + // Get a rule set by its name. A rule set is always attached to a resource and + // contains a list of access rules on the said resource. Currently only a + // default rule set for each resource is supported. + GetRuleSet(ctx context.Context, request GetRuleSetRequest) (*RuleSetResponse, error) + + // Update a rule set. + // + // Replace the rules of a rule set. First, use get to read the current version + // of the rule set before modifying it. This pattern helps prevent conflicts + // between concurrent updates. + UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) +} + +func NewAccountAccessControlPreview(client *client.DatabricksClient) *AccountAccessControlPreviewAPI { + return &AccountAccessControlPreviewAPI{ + accountAccessControlPreviewImpl: accountAccessControlPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage access rules on resources in an account. Currently, only +// grant rules are supported. A grant rule specifies a role assigned to a set of +// principals. A list of rules attached to a resource is called a rule set. +type AccountAccessControlPreviewAPI struct { + accountAccessControlPreviewImpl +} + +type AccountAccessControlProxyPreviewInterface interface { + + // Get assignable roles for a resource. + // + // Gets all the roles that can be granted on an account-level resource. A role + // is grantable if the rule set on the resource can contain an access rule of + // the role. + GetAssignableRolesForResource(ctx context.Context, request GetAssignableRolesForResourceRequest) (*GetAssignableRolesForResourceResponse, error) + + // Get a rule set. + // + // Get a rule set by its name. A rule set is always attached to a resource and + // contains a list of access rules on the said resource. Currently only a + // default rule set for each resource is supported. + GetRuleSet(ctx context.Context, request GetRuleSetRequest) (*RuleSetResponse, error) + + // Update a rule set. + // + // Replace the rules of a rule set. First, use a GET rule set request to read + // the current version of the rule set before modifying it. This pattern helps + // prevent conflicts between concurrent updates. + UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) +} + +func NewAccountAccessControlProxyPreview(client *client.DatabricksClient) *AccountAccessControlProxyPreviewAPI { + return &AccountAccessControlProxyPreviewAPI{ + accountAccessControlProxyPreviewImpl: accountAccessControlProxyPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage access rules on resources in an account. Currently, only +// grant rules are supported. A grant rule specifies a role assigned to a set of +// principals. A list of rules attached to a resource is called a rule set. A +// workspace must belong to an account for these APIs to work. +type AccountAccessControlProxyPreviewAPI struct { + accountAccessControlProxyPreviewImpl +} + +type AccountGroupsPreviewInterface interface { + + // Create a new group. + // + // Creates a group in the Databricks account with a unique name, using the + // supplied group details. + Create(ctx context.Context, request Group) (*Group, error) + + // Delete a group. + // + // Deletes a group from the Databricks account. + Delete(ctx context.Context, request DeleteAccountGroupRequest) error + + // Delete a group. + // + // Deletes a group from the Databricks account. + DeleteById(ctx context.Context, id string) error + + // Get group details. + // + // Gets the information for a specific group in the Databricks account. + Get(ctx context.Context, request GetAccountGroupRequest) (*Group, error) + + // Get group details. + // + // Gets the information for a specific group in the Databricks account. + GetById(ctx context.Context, id string) (*Group, error) + + // List group details. + // + // Gets all details of the groups associated with the Databricks account. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAccountGroupsRequest) listing.Iterator[Group] + + // List group details. + // + // Gets all details of the groups associated with the Databricks account. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAccountGroupsRequest) ([]Group, error) + + // GroupDisplayNameToIdMap calls [AccountGroupsPreviewAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. + // + // Returns an error if there's more than one [Group] with the same .DisplayName. + // + // Note: All [Group] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + GroupDisplayNameToIdMap(ctx context.Context, request ListAccountGroupsRequest) (map[string]string, error) + + // GetByDisplayName calls [AccountGroupsPreviewAPI.GroupDisplayNameToIdMap] and returns a single [Group]. + // + // Returns an error if there's more than one [Group] with the same .DisplayName. + // + // Note: All [Group] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByDisplayName(ctx context.Context, name string) (*Group, error) + + // Update group details. + // + // Partially updates the details of a group. + Patch(ctx context.Context, request PartialUpdate) error + + // Replace a group. + // + // Updates the details of a group by replacing the entire group entity. + Update(ctx context.Context, request Group) error +} + +func NewAccountGroupsPreview(client *client.DatabricksClient) *AccountGroupsPreviewAPI { + return &AccountGroupsPreviewAPI{ + accountGroupsPreviewImpl: accountGroupsPreviewImpl{ + client: client, + }, + } +} + +// Groups simplify identity management, making it easier to assign access to +// Databricks account, data, and other securable objects. +// +// It is best practice to assign access to workspaces and access-control +// policies in Unity Catalog to groups, instead of to users individually. All +// Databricks account identities can be assigned as members of groups, and +// members inherit permissions that are assigned to their group. +type AccountGroupsPreviewAPI struct { + accountGroupsPreviewImpl +} + +// Delete a group. +// +// Deletes a group from the Databricks account. +func (a *AccountGroupsPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.accountGroupsPreviewImpl.Delete(ctx, DeleteAccountGroupRequest{ + Id: id, + }) +} + +// Get group details. +// +// Gets the information for a specific group in the Databricks account. +func (a *AccountGroupsPreviewAPI) GetById(ctx context.Context, id string) (*Group, error) { + return a.accountGroupsPreviewImpl.Get(ctx, GetAccountGroupRequest{ + Id: id, + }) +} + +// GroupDisplayNameToIdMap calls [AccountGroupsPreviewAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. +// +// Returns an error if there's more than one [Group] with the same .DisplayName. +// +// Note: All [Group] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AccountGroupsPreviewAPI) GroupDisplayNameToIdMap(ctx context.Context, request ListAccountGroupsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.DisplayName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .DisplayName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByDisplayName calls [AccountGroupsPreviewAPI.GroupDisplayNameToIdMap] and returns a single [Group]. +// +// Returns an error if there's more than one [Group] with the same .DisplayName. +// +// Note: All [Group] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AccountGroupsPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*Group, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListAccountGroupsRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]Group{} + for _, v := range result { + key := v.DisplayName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("Group named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of Group named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type AccountServicePrincipalsPreviewInterface interface { + + // Create a service principal. + // + // Creates a new service principal in the Databricks account. + Create(ctx context.Context, request ServicePrincipal) (*ServicePrincipal, error) + + // Delete a service principal. + // + // Delete a single service principal in the Databricks account. + Delete(ctx context.Context, request DeleteAccountServicePrincipalRequest) error + + // Delete a service principal. + // + // Delete a single service principal in the Databricks account. + DeleteById(ctx context.Context, id string) error + + // Get service principal details. + // + // Gets the details for a single service principal define in the Databricks + // account. + Get(ctx context.Context, request GetAccountServicePrincipalRequest) (*ServicePrincipal, error) + + // Get service principal details. + // + // Gets the details for a single service principal define in the Databricks + // account. + GetById(ctx context.Context, id string) (*ServicePrincipal, error) + + // List service principals. + // + // Gets the set of service principals associated with a Databricks account. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAccountServicePrincipalsRequest) listing.Iterator[ServicePrincipal] + + // List service principals. + // + // Gets the set of service principals associated with a Databricks account. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAccountServicePrincipalsRequest) ([]ServicePrincipal, error) + + // ServicePrincipalDisplayNameToIdMap calls [AccountServicePrincipalsPreviewAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. + // + // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. + // + // Note: All [ServicePrincipal] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ServicePrincipalDisplayNameToIdMap(ctx context.Context, request ListAccountServicePrincipalsRequest) (map[string]string, error) + + // GetByDisplayName calls [AccountServicePrincipalsPreviewAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. + // + // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. + // + // Note: All [ServicePrincipal] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByDisplayName(ctx context.Context, name string) (*ServicePrincipal, error) + + // Update service principal details. + // + // Partially updates the details of a single service principal in the Databricks + // account. + Patch(ctx context.Context, request PartialUpdate) error + + // Replace service principal. + // + // Updates the details of a single service principal. + // + // This action replaces the existing service principal with the same name. + Update(ctx context.Context, request ServicePrincipal) error +} + +func NewAccountServicePrincipalsPreview(client *client.DatabricksClient) *AccountServicePrincipalsPreviewAPI { + return &AccountServicePrincipalsPreviewAPI{ + accountServicePrincipalsPreviewImpl: accountServicePrincipalsPreviewImpl{ + client: client, + }, + } +} + +// Identities for use with jobs, automated tools, and systems such as scripts, +// apps, and CI/CD platforms. Databricks recommends creating service principals +// to run production jobs or modify production data. If all processes that act +// on production data run with service principals, interactive users do not need +// any write, delete, or modify privileges in production. This eliminates the +// risk of a user overwriting production data by accident. +type AccountServicePrincipalsPreviewAPI struct { + accountServicePrincipalsPreviewImpl +} + +// Delete a service principal. +// +// Delete a single service principal in the Databricks account. +func (a *AccountServicePrincipalsPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.accountServicePrincipalsPreviewImpl.Delete(ctx, DeleteAccountServicePrincipalRequest{ + Id: id, + }) +} + +// Get service principal details. +// +// Gets the details for a single service principal define in the Databricks +// account. +func (a *AccountServicePrincipalsPreviewAPI) GetById(ctx context.Context, id string) (*ServicePrincipal, error) { + return a.accountServicePrincipalsPreviewImpl.Get(ctx, GetAccountServicePrincipalRequest{ + Id: id, + }) +} + +// ServicePrincipalDisplayNameToIdMap calls [AccountServicePrincipalsPreviewAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. +// +// Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. +// +// Note: All [ServicePrincipal] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AccountServicePrincipalsPreviewAPI) ServicePrincipalDisplayNameToIdMap(ctx context.Context, request ListAccountServicePrincipalsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.DisplayName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .DisplayName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByDisplayName calls [AccountServicePrincipalsPreviewAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. +// +// Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. +// +// Note: All [ServicePrincipal] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AccountServicePrincipalsPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*ServicePrincipal, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListAccountServicePrincipalsRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ServicePrincipal{} + for _, v := range result { + key := v.DisplayName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ServicePrincipal named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ServicePrincipal named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type AccountUsersPreviewInterface interface { + + // Create a new user. + // + // Creates a new user in the Databricks account. This new user will also be + // added to the Databricks account. + Create(ctx context.Context, request User) (*User, error) + + // Delete a user. + // + // Deletes a user. Deleting a user from a Databricks account also removes + // objects associated with the user. + Delete(ctx context.Context, request DeleteAccountUserRequest) error + + // Delete a user. + // + // Deletes a user. Deleting a user from a Databricks account also removes + // objects associated with the user. + DeleteById(ctx context.Context, id string) error + + // Get user details. + // + // Gets information for a specific user in Databricks account. + Get(ctx context.Context, request GetAccountUserRequest) (*User, error) + + // Get user details. + // + // Gets information for a specific user in Databricks account. + GetById(ctx context.Context, id string) (*User, error) + + // List users. + // + // Gets details for all the users associated with a Databricks account. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAccountUsersRequest) listing.Iterator[User] + + // List users. + // + // Gets details for all the users associated with a Databricks account. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAccountUsersRequest) ([]User, error) + + // UserUserNameToIdMap calls [AccountUsersPreviewAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. + // + // Returns an error if there's more than one [User] with the same .UserName. + // + // Note: All [User] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + UserUserNameToIdMap(ctx context.Context, request ListAccountUsersRequest) (map[string]string, error) + + // GetByUserName calls [AccountUsersPreviewAPI.UserUserNameToIdMap] and returns a single [User]. + // + // Returns an error if there's more than one [User] with the same .UserName. + // + // Note: All [User] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByUserName(ctx context.Context, name string) (*User, error) + + // Update user details. + // + // Partially updates a user resource by applying the supplied operations on + // specific user attributes. + Patch(ctx context.Context, request PartialUpdate) error + + // Replace a user. + // + // Replaces a user's information with the data supplied in request. + Update(ctx context.Context, request User) error +} + +func NewAccountUsersPreview(client *client.DatabricksClient) *AccountUsersPreviewAPI { + return &AccountUsersPreviewAPI{ + accountUsersPreviewImpl: accountUsersPreviewImpl{ + client: client, + }, + } +} + +// User identities recognized by Databricks and represented by email addresses. +// +// Databricks recommends using SCIM provisioning to sync users and groups +// automatically from your identity provider to your Databricks account. SCIM +// streamlines onboarding a new employee or team by using your identity provider +// to create users and groups in Databricks account and give them the proper +// level of access. When a user leaves your organization or no longer needs +// access to Databricks account, admins can terminate the user in your identity +// provider and that user’s account will also be removed from Databricks +// account. This ensures a consistent offboarding process and prevents +// unauthorized users from accessing sensitive data. +type AccountUsersPreviewAPI struct { + accountUsersPreviewImpl +} + +// Delete a user. +// +// Deletes a user. Deleting a user from a Databricks account also removes +// objects associated with the user. +func (a *AccountUsersPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.accountUsersPreviewImpl.Delete(ctx, DeleteAccountUserRequest{ + Id: id, + }) +} + +// Get user details. +// +// Gets information for a specific user in Databricks account. +func (a *AccountUsersPreviewAPI) GetById(ctx context.Context, id string) (*User, error) { + return a.accountUsersPreviewImpl.Get(ctx, GetAccountUserRequest{ + Id: id, + }) +} + +// UserUserNameToIdMap calls [AccountUsersPreviewAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. +// +// Returns an error if there's more than one [User] with the same .UserName. +// +// Note: All [User] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AccountUsersPreviewAPI) UserUserNameToIdMap(ctx context.Context, request ListAccountUsersRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.UserName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .UserName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByUserName calls [AccountUsersPreviewAPI.UserUserNameToIdMap] and returns a single [User]. +// +// Returns an error if there's more than one [User] with the same .UserName. +// +// Note: All [User] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AccountUsersPreviewAPI) GetByUserName(ctx context.Context, name string) (*User, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListAccountUsersRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]User{} + for _, v := range result { + key := v.UserName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("User named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of User named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type CurrentUserPreviewInterface interface { + + // Get current user info. + // + // Get details about the current method caller's identity. + Me(ctx context.Context) (*User, error) +} + +func NewCurrentUserPreview(client *client.DatabricksClient) *CurrentUserPreviewAPI { + return &CurrentUserPreviewAPI{ + currentUserPreviewImpl: currentUserPreviewImpl{ + client: client, + }, + } +} + +// This API allows retrieving information about currently authenticated user or +// service principal. +type CurrentUserPreviewAPI struct { + currentUserPreviewImpl +} + +type GroupsPreviewInterface interface { + + // Create a new group. + // + // Creates a group in the Databricks workspace with a unique name, using the + // supplied group details. + Create(ctx context.Context, request Group) (*Group, error) + + // Delete a group. + // + // Deletes a group from the Databricks workspace. + Delete(ctx context.Context, request DeleteGroupRequest) error + + // Delete a group. + // + // Deletes a group from the Databricks workspace. + DeleteById(ctx context.Context, id string) error + + // Get group details. + // + // Gets the information for a specific group in the Databricks workspace. + Get(ctx context.Context, request GetGroupRequest) (*Group, error) + + // Get group details. + // + // Gets the information for a specific group in the Databricks workspace. + GetById(ctx context.Context, id string) (*Group, error) + + // List group details. + // + // Gets all details of the groups associated with the Databricks workspace. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListGroupsRequest) listing.Iterator[Group] + + // List group details. + // + // Gets all details of the groups associated with the Databricks workspace. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListGroupsRequest) ([]Group, error) + + // GroupDisplayNameToIdMap calls [GroupsPreviewAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. + // + // Returns an error if there's more than one [Group] with the same .DisplayName. + // + // Note: All [Group] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + GroupDisplayNameToIdMap(ctx context.Context, request ListGroupsRequest) (map[string]string, error) + + // GetByDisplayName calls [GroupsPreviewAPI.GroupDisplayNameToIdMap] and returns a single [Group]. + // + // Returns an error if there's more than one [Group] with the same .DisplayName. + // + // Note: All [Group] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByDisplayName(ctx context.Context, name string) (*Group, error) + + // Update group details. + // + // Partially updates the details of a group. + Patch(ctx context.Context, request PartialUpdate) error + + // Replace a group. + // + // Updates the details of a group by replacing the entire group entity. + Update(ctx context.Context, request Group) error +} + +func NewGroupsPreview(client *client.DatabricksClient) *GroupsPreviewAPI { + return &GroupsPreviewAPI{ + groupsPreviewImpl: groupsPreviewImpl{ + client: client, + }, + } +} + +// Groups simplify identity management, making it easier to assign access to +// Databricks workspace, data, and other securable objects. +// +// It is best practice to assign access to workspaces and access-control +// policies in Unity Catalog to groups, instead of to users individually. All +// Databricks workspace identities can be assigned as members of groups, and +// members inherit permissions that are assigned to their group. +type GroupsPreviewAPI struct { + groupsPreviewImpl +} + +// Delete a group. +// +// Deletes a group from the Databricks workspace. +func (a *GroupsPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.groupsPreviewImpl.Delete(ctx, DeleteGroupRequest{ + Id: id, + }) +} + +// Get group details. +// +// Gets the information for a specific group in the Databricks workspace. +func (a *GroupsPreviewAPI) GetById(ctx context.Context, id string) (*Group, error) { + return a.groupsPreviewImpl.Get(ctx, GetGroupRequest{ + Id: id, + }) +} + +// GroupDisplayNameToIdMap calls [GroupsPreviewAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. +// +// Returns an error if there's more than one [Group] with the same .DisplayName. +// +// Note: All [Group] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *GroupsPreviewAPI) GroupDisplayNameToIdMap(ctx context.Context, request ListGroupsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.DisplayName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .DisplayName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByDisplayName calls [GroupsPreviewAPI.GroupDisplayNameToIdMap] and returns a single [Group]. +// +// Returns an error if there's more than one [Group] with the same .DisplayName. +// +// Note: All [Group] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *GroupsPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*Group, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListGroupsRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]Group{} + for _, v := range result { + key := v.DisplayName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("Group named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of Group named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type PermissionMigrationPreviewInterface interface { + + // Migrate Permissions. + MigratePermissions(ctx context.Context, request MigratePermissionsRequest) (*MigratePermissionsResponse, error) +} + +func NewPermissionMigrationPreview(client *client.DatabricksClient) *PermissionMigrationPreviewAPI { + return &PermissionMigrationPreviewAPI{ + permissionMigrationPreviewImpl: permissionMigrationPreviewImpl{ + client: client, + }, + } +} + +// APIs for migrating acl permissions, used only by the ucx tool: +// https://github.com/databrickslabs/ucx +type PermissionMigrationPreviewAPI struct { + permissionMigrationPreviewImpl +} + +type PermissionsPreviewInterface interface { + + // Get object permissions. + // + // Gets the permissions of an object. Objects can inherit permissions from their + // parent objects or root object. + Get(ctx context.Context, request GetPermissionRequest) (*ObjectPermissions, error) + + // Get object permissions. + // + // Gets the permissions of an object. Objects can inherit permissions from their + // parent objects or root object. + GetByRequestObjectTypeAndRequestObjectId(ctx context.Context, requestObjectType string, requestObjectId string) (*ObjectPermissions, error) + + // Get object permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetPermissionLevelsRequest) (*GetPermissionLevelsResponse, error) + + // Get object permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByRequestObjectTypeAndRequestObjectId(ctx context.Context, requestObjectType string, requestObjectId string) (*GetPermissionLevelsResponse, error) + + // Set object permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their parent objects or root object. + Set(ctx context.Context, request PermissionsRequest) (*ObjectPermissions, error) + + // Update object permissions. + // + // Updates the permissions on an object. Objects can inherit permissions from + // their parent objects or root object. + Update(ctx context.Context, request PermissionsRequest) (*ObjectPermissions, error) +} + +func NewPermissionsPreview(client *client.DatabricksClient) *PermissionsPreviewAPI { + return &PermissionsPreviewAPI{ + permissionsPreviewImpl: permissionsPreviewImpl{ + client: client, + }, + } +} + +// Permissions API are used to create read, write, edit, update and manage +// access for various users on different objects and endpoints. +// +// * **[Apps permissions](:service:apps)** — Manage which users can manage or +// use apps. +// +// * **[Cluster permissions](:service:clusters)** — Manage which users can +// manage, restart, or attach to clusters. +// +// * **[Cluster policy permissions](:service:clusterpolicies)** — Manage which +// users can use cluster policies. +// +// * **[Delta Live Tables pipeline permissions](:service:pipelines)** — Manage +// which users can view, manage, run, cancel, or own a Delta Live Tables +// pipeline. +// +// * **[Job permissions](:service:jobs)** — Manage which users can view, +// manage, trigger, cancel, or own a job. +// +// * **[MLflow experiment permissions](:service:experiments)** — Manage which +// users can read, edit, or manage MLflow experiments. +// +// * **[MLflow registered model permissions](:service:modelregistry)** — +// Manage which users can read, edit, or manage MLflow registered models. +// +// * **[Password permissions](:service:users)** — Manage which users can use +// password login when SSO is enabled. +// +// * **[Instance Pool permissions](:service:instancepools)** — Manage which +// users can manage or attach to pools. +// +// * **[Repo permissions](repos)** — Manage which users can read, run, edit, +// or manage a repo. +// +// * **[Serving endpoint permissions](:service:servingendpoints)** — Manage +// which users can view, query, or manage a serving endpoint. +// +// * **[SQL warehouse permissions](:service:warehouses)** — Manage which users +// can use or manage SQL warehouses. +// +// * **[Token permissions](:service:tokenmanagement)** — Manage which users +// can create or use tokens. +// +// * **[Workspace object permissions](:service:workspace)** — Manage which +// users can read, run, edit, or manage alerts, dbsql-dashboards, directories, +// files, notebooks and queries. +// +// For the mapping of the required permissions for specific actions or abilities +// and other important information, see [Access Control]. +// +// Note that to manage access control on service principals, use **[Account +// Access Control Proxy](:service:accountaccesscontrolproxy)**. +// +// [Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html +type PermissionsPreviewAPI struct { + permissionsPreviewImpl +} + +// Get object permissions. +// +// Gets the permissions of an object. Objects can inherit permissions from their +// parent objects or root object. +func (a *PermissionsPreviewAPI) GetByRequestObjectTypeAndRequestObjectId(ctx context.Context, requestObjectType string, requestObjectId string) (*ObjectPermissions, error) { + return a.permissionsPreviewImpl.Get(ctx, GetPermissionRequest{ + RequestObjectType: requestObjectType, + RequestObjectId: requestObjectId, + }) +} + +// Get object permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *PermissionsPreviewAPI) GetPermissionLevelsByRequestObjectTypeAndRequestObjectId(ctx context.Context, requestObjectType string, requestObjectId string) (*GetPermissionLevelsResponse, error) { + return a.permissionsPreviewImpl.GetPermissionLevels(ctx, GetPermissionLevelsRequest{ + RequestObjectType: requestObjectType, + RequestObjectId: requestObjectId, + }) +} + +type ServicePrincipalsPreviewInterface interface { + + // Create a service principal. + // + // Creates a new service principal in the Databricks workspace. + Create(ctx context.Context, request ServicePrincipal) (*ServicePrincipal, error) + + // Delete a service principal. + // + // Delete a single service principal in the Databricks workspace. + Delete(ctx context.Context, request DeleteServicePrincipalRequest) error + + // Delete a service principal. + // + // Delete a single service principal in the Databricks workspace. + DeleteById(ctx context.Context, id string) error + + // Get service principal details. + // + // Gets the details for a single service principal define in the Databricks + // workspace. + Get(ctx context.Context, request GetServicePrincipalRequest) (*ServicePrincipal, error) + + // Get service principal details. + // + // Gets the details for a single service principal define in the Databricks + // workspace. + GetById(ctx context.Context, id string) (*ServicePrincipal, error) + + // List service principals. + // + // Gets the set of service principals associated with a Databricks workspace. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListServicePrincipalsRequest) listing.Iterator[ServicePrincipal] + + // List service principals. + // + // Gets the set of service principals associated with a Databricks workspace. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListServicePrincipalsRequest) ([]ServicePrincipal, error) + + // ServicePrincipalDisplayNameToIdMap calls [ServicePrincipalsPreviewAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. + // + // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. + // + // Note: All [ServicePrincipal] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ServicePrincipalDisplayNameToIdMap(ctx context.Context, request ListServicePrincipalsRequest) (map[string]string, error) + + // GetByDisplayName calls [ServicePrincipalsPreviewAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. + // + // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. + // + // Note: All [ServicePrincipal] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByDisplayName(ctx context.Context, name string) (*ServicePrincipal, error) + + // Update service principal details. + // + // Partially updates the details of a single service principal in the Databricks + // workspace. + Patch(ctx context.Context, request PartialUpdate) error + + // Replace service principal. + // + // Updates the details of a single service principal. + // + // This action replaces the existing service principal with the same name. + Update(ctx context.Context, request ServicePrincipal) error +} + +func NewServicePrincipalsPreview(client *client.DatabricksClient) *ServicePrincipalsPreviewAPI { + return &ServicePrincipalsPreviewAPI{ + servicePrincipalsPreviewImpl: servicePrincipalsPreviewImpl{ + client: client, + }, + } +} + +// Identities for use with jobs, automated tools, and systems such as scripts, +// apps, and CI/CD platforms. Databricks recommends creating service principals +// to run production jobs or modify production data. If all processes that act +// on production data run with service principals, interactive users do not need +// any write, delete, or modify privileges in production. This eliminates the +// risk of a user overwriting production data by accident. +type ServicePrincipalsPreviewAPI struct { + servicePrincipalsPreviewImpl +} + +// Delete a service principal. +// +// Delete a single service principal in the Databricks workspace. +func (a *ServicePrincipalsPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.servicePrincipalsPreviewImpl.Delete(ctx, DeleteServicePrincipalRequest{ + Id: id, + }) +} + +// Get service principal details. +// +// Gets the details for a single service principal define in the Databricks +// workspace. +func (a *ServicePrincipalsPreviewAPI) GetById(ctx context.Context, id string) (*ServicePrincipal, error) { + return a.servicePrincipalsPreviewImpl.Get(ctx, GetServicePrincipalRequest{ + Id: id, + }) +} + +// ServicePrincipalDisplayNameToIdMap calls [ServicePrincipalsPreviewAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. +// +// Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. +// +// Note: All [ServicePrincipal] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ServicePrincipalsPreviewAPI) ServicePrincipalDisplayNameToIdMap(ctx context.Context, request ListServicePrincipalsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.DisplayName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .DisplayName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByDisplayName calls [ServicePrincipalsPreviewAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. +// +// Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. +// +// Note: All [ServicePrincipal] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ServicePrincipalsPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*ServicePrincipal, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListServicePrincipalsRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ServicePrincipal{} + for _, v := range result { + key := v.DisplayName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ServicePrincipal named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ServicePrincipal named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type UsersPreviewInterface interface { + + // Create a new user. + // + // Creates a new user in the Databricks workspace. This new user will also be + // added to the Databricks account. + Create(ctx context.Context, request User) (*User, error) + + // Delete a user. + // + // Deletes a user. Deleting a user from a Databricks workspace also removes + // objects associated with the user. + Delete(ctx context.Context, request DeleteUserRequest) error + + // Delete a user. + // + // Deletes a user. Deleting a user from a Databricks workspace also removes + // objects associated with the user. + DeleteById(ctx context.Context, id string) error + + // Get user details. + // + // Gets information for a specific user in Databricks workspace. + Get(ctx context.Context, request GetUserRequest) (*User, error) + + // Get user details. + // + // Gets information for a specific user in Databricks workspace. + GetById(ctx context.Context, id string) (*User, error) + + // Get password permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context) (*GetPasswordPermissionLevelsResponse, error) + + // Get password permissions. + // + // Gets the permissions of all passwords. Passwords can inherit permissions from + // their root object. + GetPermissions(ctx context.Context) (*PasswordPermissions, error) + + // List users. + // + // Gets details for all the users associated with a Databricks workspace. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListUsersRequest) listing.Iterator[User] + + // List users. + // + // Gets details for all the users associated with a Databricks workspace. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListUsersRequest) ([]User, error) + + // UserUserNameToIdMap calls [UsersPreviewAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. + // + // Returns an error if there's more than one [User] with the same .UserName. + // + // Note: All [User] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + UserUserNameToIdMap(ctx context.Context, request ListUsersRequest) (map[string]string, error) + + // GetByUserName calls [UsersPreviewAPI.UserUserNameToIdMap] and returns a single [User]. + // + // Returns an error if there's more than one [User] with the same .UserName. + // + // Note: All [User] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByUserName(ctx context.Context, name string) (*User, error) + + // Update user details. + // + // Partially updates a user resource by applying the supplied operations on + // specific user attributes. + Patch(ctx context.Context, request PartialUpdate) error + + // Set password permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request PasswordPermissionsRequest) (*PasswordPermissions, error) + + // Replace a user. + // + // Replaces a user's information with the data supplied in request. + Update(ctx context.Context, request User) error + + // Update password permissions. + // + // Updates the permissions on all passwords. Passwords can inherit permissions + // from their root object. + UpdatePermissions(ctx context.Context, request PasswordPermissionsRequest) (*PasswordPermissions, error) +} + +func NewUsersPreview(client *client.DatabricksClient) *UsersPreviewAPI { + return &UsersPreviewAPI{ + usersPreviewImpl: usersPreviewImpl{ + client: client, + }, + } +} + +// User identities recognized by Databricks and represented by email addresses. +// +// Databricks recommends using SCIM provisioning to sync users and groups +// automatically from your identity provider to your Databricks workspace. SCIM +// streamlines onboarding a new employee or team by using your identity provider +// to create users and groups in Databricks workspace and give them the proper +// level of access. When a user leaves your organization or no longer needs +// access to Databricks workspace, admins can terminate the user in your +// identity provider and that user’s account will also be removed from +// Databricks workspace. This ensures a consistent offboarding process and +// prevents unauthorized users from accessing sensitive data. +type UsersPreviewAPI struct { + usersPreviewImpl +} + +// Delete a user. +// +// Deletes a user. Deleting a user from a Databricks workspace also removes +// objects associated with the user. +func (a *UsersPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.usersPreviewImpl.Delete(ctx, DeleteUserRequest{ + Id: id, + }) +} + +// Get user details. +// +// Gets information for a specific user in Databricks workspace. +func (a *UsersPreviewAPI) GetById(ctx context.Context, id string) (*User, error) { + return a.usersPreviewImpl.Get(ctx, GetUserRequest{ + Id: id, + }) +} + +// UserUserNameToIdMap calls [UsersPreviewAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. +// +// Returns an error if there's more than one [User] with the same .UserName. +// +// Note: All [User] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *UsersPreviewAPI) UserUserNameToIdMap(ctx context.Context, request ListUsersRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.UserName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .UserName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByUserName calls [UsersPreviewAPI.UserUserNameToIdMap] and returns a single [User]. +// +// Returns an error if there's more than one [User] with the same .UserName. +// +// Note: All [User] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *UsersPreviewAPI) GetByUserName(ctx context.Context, name string) (*User, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListUsersRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]User{} + for _, v := range result { + key := v.UserName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("User named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of User named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type WorkspaceAssignmentPreviewInterface interface { + + // Delete permissions assignment. + // + // Deletes the workspace permissions assignment in a given account and workspace + // for the specified principal. + Delete(ctx context.Context, request DeleteWorkspaceAssignmentRequest) error + + // Delete permissions assignment. + // + // Deletes the workspace permissions assignment in a given account and workspace + // for the specified principal. + DeleteByWorkspaceIdAndPrincipalId(ctx context.Context, workspaceId int64, principalId int64) error + + // List workspace permissions. + // + // Get an array of workspace permissions for the specified account and + // workspace. + Get(ctx context.Context, request GetWorkspaceAssignmentRequest) (*WorkspacePermissions, error) + + // List workspace permissions. + // + // Get an array of workspace permissions for the specified account and + // workspace. + GetByWorkspaceId(ctx context.Context, workspaceId int64) (*WorkspacePermissions, error) + + // Get permission assignments. + // + // Get the permission assignments for the specified Databricks account and + // Databricks workspace. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListWorkspaceAssignmentRequest) listing.Iterator[PermissionAssignment] + + // Get permission assignments. + // + // Get the permission assignments for the specified Databricks account and + // Databricks workspace. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListWorkspaceAssignmentRequest) ([]PermissionAssignment, error) + + // Get permission assignments. + // + // Get the permission assignments for the specified Databricks account and + // Databricks workspace. + ListByWorkspaceId(ctx context.Context, workspaceId int64) (*PermissionAssignments, error) + + // Create or update permissions assignment. + // + // Creates or updates the workspace permissions assignment in a given account + // and workspace for the specified principal. + Update(ctx context.Context, request UpdateWorkspaceAssignments) (*PermissionAssignment, error) +} + +func NewWorkspaceAssignmentPreview(client *client.DatabricksClient) *WorkspaceAssignmentPreviewAPI { + return &WorkspaceAssignmentPreviewAPI{ + workspaceAssignmentPreviewImpl: workspaceAssignmentPreviewImpl{ + client: client, + }, + } +} + +// The Workspace Permission Assignment API allows you to manage workspace +// permissions for principals in your account. +type WorkspaceAssignmentPreviewAPI struct { + workspaceAssignmentPreviewImpl +} + +// Delete permissions assignment. +// +// Deletes the workspace permissions assignment in a given account and workspace +// for the specified principal. +func (a *WorkspaceAssignmentPreviewAPI) DeleteByWorkspaceIdAndPrincipalId(ctx context.Context, workspaceId int64, principalId int64) error { + return a.workspaceAssignmentPreviewImpl.Delete(ctx, DeleteWorkspaceAssignmentRequest{ + WorkspaceId: workspaceId, + PrincipalId: principalId, + }) +} + +// List workspace permissions. +// +// Get an array of workspace permissions for the specified account and +// workspace. +func (a *WorkspaceAssignmentPreviewAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*WorkspacePermissions, error) { + return a.workspaceAssignmentPreviewImpl.Get(ctx, GetWorkspaceAssignmentRequest{ + WorkspaceId: workspaceId, + }) +} + +// Get permission assignments. +// +// Get the permission assignments for the specified Databricks account and +// Databricks workspace. +func (a *WorkspaceAssignmentPreviewAPI) ListByWorkspaceId(ctx context.Context, workspaceId int64) (*PermissionAssignments, error) { + return a.workspaceAssignmentPreviewImpl.internalList(ctx, ListWorkspaceAssignmentRequest{ + WorkspaceId: workspaceId, + }) +} diff --git a/iam/v2preview/client.go b/iam/v2preview/client.go new file mode 100755 index 000000000..e136a8a59 --- /dev/null +++ b/iam/v2preview/client.go @@ -0,0 +1,433 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package iampreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type AccessControlPreviewClient struct { + AccessControlPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewAccessControlPreviewClient(cfg *config.Config) (*AccessControlPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &AccessControlPreviewClient{ + Config: cfg, + apiClient: apiClient, + AccessControlPreviewInterface: NewAccessControlPreview(databricksClient), + }, nil +} + +type AccountAccessControlPreviewClient struct { + AccountAccessControlPreviewInterface + + Config *config.Config +} + +func NewAccountAccessControlPreviewClient(cfg *config.Config) (*AccountAccessControlPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &AccountAccessControlPreviewClient{ + Config: cfg, + AccountAccessControlPreviewInterface: NewAccountAccessControlPreview(apiClient), + }, nil +} + +type AccountAccessControlProxyPreviewClient struct { + AccountAccessControlProxyPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewAccountAccessControlProxyPreviewClient(cfg *config.Config) (*AccountAccessControlProxyPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &AccountAccessControlProxyPreviewClient{ + Config: cfg, + apiClient: apiClient, + AccountAccessControlProxyPreviewInterface: NewAccountAccessControlProxyPreview(databricksClient), + }, nil +} + +type AccountGroupsPreviewClient struct { + AccountGroupsPreviewInterface + + Config *config.Config +} + +func NewAccountGroupsPreviewClient(cfg *config.Config) (*AccountGroupsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &AccountGroupsPreviewClient{ + Config: cfg, + AccountGroupsPreviewInterface: NewAccountGroupsPreview(apiClient), + }, nil +} + +type AccountServicePrincipalsPreviewClient struct { + AccountServicePrincipalsPreviewInterface + + Config *config.Config +} + +func NewAccountServicePrincipalsPreviewClient(cfg *config.Config) (*AccountServicePrincipalsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &AccountServicePrincipalsPreviewClient{ + Config: cfg, + AccountServicePrincipalsPreviewInterface: NewAccountServicePrincipalsPreview(apiClient), + }, nil +} + +type AccountUsersPreviewClient struct { + AccountUsersPreviewInterface + + Config *config.Config +} + +func NewAccountUsersPreviewClient(cfg *config.Config) (*AccountUsersPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &AccountUsersPreviewClient{ + Config: cfg, + AccountUsersPreviewInterface: NewAccountUsersPreview(apiClient), + }, nil +} + +type CurrentUserPreviewClient struct { + CurrentUserPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewCurrentUserPreviewClient(cfg *config.Config) (*CurrentUserPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &CurrentUserPreviewClient{ + Config: cfg, + apiClient: apiClient, + CurrentUserPreviewInterface: NewCurrentUserPreview(databricksClient), + }, nil +} + +type GroupsPreviewClient struct { + GroupsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewGroupsPreviewClient(cfg *config.Config) (*GroupsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &GroupsPreviewClient{ + Config: cfg, + apiClient: apiClient, + GroupsPreviewInterface: NewGroupsPreview(databricksClient), + }, nil +} + +type PermissionMigrationPreviewClient struct { + PermissionMigrationPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewPermissionMigrationPreviewClient(cfg *config.Config) (*PermissionMigrationPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &PermissionMigrationPreviewClient{ + Config: cfg, + apiClient: apiClient, + PermissionMigrationPreviewInterface: NewPermissionMigrationPreview(databricksClient), + }, nil +} + +type PermissionsPreviewClient struct { + PermissionsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewPermissionsPreviewClient(cfg *config.Config) (*PermissionsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &PermissionsPreviewClient{ + Config: cfg, + apiClient: apiClient, + PermissionsPreviewInterface: NewPermissionsPreview(databricksClient), + }, nil +} + +type ServicePrincipalsPreviewClient struct { + ServicePrincipalsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewServicePrincipalsPreviewClient(cfg *config.Config) (*ServicePrincipalsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ServicePrincipalsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ServicePrincipalsPreviewInterface: NewServicePrincipalsPreview(databricksClient), + }, nil +} + +type UsersPreviewClient struct { + UsersPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewUsersPreviewClient(cfg *config.Config) (*UsersPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &UsersPreviewClient{ + Config: cfg, + apiClient: apiClient, + UsersPreviewInterface: NewUsersPreview(databricksClient), + }, nil +} + +type WorkspaceAssignmentPreviewClient struct { + WorkspaceAssignmentPreviewInterface + + Config *config.Config +} + +func NewWorkspaceAssignmentPreviewClient(cfg *config.Config) (*WorkspaceAssignmentPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &WorkspaceAssignmentPreviewClient{ + Config: cfg, + WorkspaceAssignmentPreviewInterface: NewWorkspaceAssignmentPreview(apiClient), + }, nil +} diff --git a/iam/v2preview/impl.go b/iam/v2preview/impl.go new file mode 100755 index 000000000..afda9e289 --- /dev/null +++ b/iam/v2preview/impl.go @@ -0,0 +1,962 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package iampreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just AccessControlPreview API methods +type accessControlPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *accessControlPreviewImpl) CheckPolicy(ctx context.Context, request CheckPolicyRequest) (*CheckPolicyResponse, error) { + var checkPolicyResponse CheckPolicyResponse + path := "/api/2.0preview/access-control/check-policy-v2" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &checkPolicyResponse) + return &checkPolicyResponse, err +} + +// unexported type that holds implementations of just AccountAccessControlPreview API methods +type accountAccessControlPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *accountAccessControlPreviewImpl) GetAssignableRolesForResource(ctx context.Context, request GetAssignableRolesForResourceRequest) (*GetAssignableRolesForResourceResponse, error) { + var getAssignableRolesForResourceResponse GetAssignableRolesForResourceResponse + path := fmt.Sprintf("/api/2.0preview/preview/accounts/%v/access-control/assignable-roles", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getAssignableRolesForResourceResponse) + return &getAssignableRolesForResourceResponse, err +} + +func (a *accountAccessControlPreviewImpl) GetRuleSet(ctx context.Context, request GetRuleSetRequest) (*RuleSetResponse, error) { + var ruleSetResponse RuleSetResponse + path := fmt.Sprintf("/api/2.0preview/preview/accounts/%v/access-control/rule-sets", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &ruleSetResponse) + return &ruleSetResponse, err +} + +func (a *accountAccessControlPreviewImpl) UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) { + var ruleSetResponse RuleSetResponse + path := fmt.Sprintf("/api/2.0preview/preview/accounts/%v/access-control/rule-sets", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &ruleSetResponse) + return &ruleSetResponse, err +} + +// unexported type that holds implementations of just AccountAccessControlProxyPreview API methods +type accountAccessControlProxyPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *accountAccessControlProxyPreviewImpl) GetAssignableRolesForResource(ctx context.Context, request GetAssignableRolesForResourceRequest) (*GetAssignableRolesForResourceResponse, error) { + var getAssignableRolesForResourceResponse GetAssignableRolesForResourceResponse + path := "/api/2.0preview/preview/accounts/access-control/assignable-roles" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getAssignableRolesForResourceResponse) + return &getAssignableRolesForResourceResponse, err +} + +func (a *accountAccessControlProxyPreviewImpl) GetRuleSet(ctx context.Context, request GetRuleSetRequest) (*RuleSetResponse, error) { + var ruleSetResponse RuleSetResponse + path := "/api/2.0preview/preview/accounts/access-control/rule-sets" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &ruleSetResponse) + return &ruleSetResponse, err +} + +func (a *accountAccessControlProxyPreviewImpl) UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) { + var ruleSetResponse RuleSetResponse + path := "/api/2.0preview/preview/accounts/access-control/rule-sets" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &ruleSetResponse) + return &ruleSetResponse, err +} + +// unexported type that holds implementations of just AccountGroupsPreview API methods +type accountGroupsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *accountGroupsPreviewImpl) Create(ctx context.Context, request Group) (*Group, error) { + var group Group + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &group) + return &group, err +} + +func (a *accountGroupsPreviewImpl) Delete(ctx context.Context, request DeleteAccountGroupRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *accountGroupsPreviewImpl) Get(ctx context.Context, request GetAccountGroupRequest) (*Group, error) { + var group Group + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &group) + return &group, err +} + +// List group details. +// +// Gets all details of the groups associated with the Databricks account. +func (a *accountGroupsPreviewImpl) List(ctx context.Context, request ListAccountGroupsRequest) listing.Iterator[Group] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListAccountGroupsRequest) (*ListGroupsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListGroupsResponse) []Group { + return resp.Resources + } + getNextReq := func(resp *ListGroupsResponse) *ListAccountGroupsRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[Group, string]( + iterator, + func(item Group) string { + return item.Id + }) + return dedupedIterator +} + +// List group details. +// +// Gets all details of the groups associated with the Databricks account. +func (a *accountGroupsPreviewImpl) ListAll(ctx context.Context, request ListAccountGroupsRequest) ([]Group, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[Group, int64](ctx, iterator, request.Count) + +} +func (a *accountGroupsPreviewImpl) internalList(ctx context.Context, request ListAccountGroupsRequest) (*ListGroupsResponse, error) { + var listGroupsResponse ListGroupsResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listGroupsResponse) + return &listGroupsResponse, err +} + +func (a *accountGroupsPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { + var patchResponse PatchResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) + return err +} + +func (a *accountGroupsPreviewImpl) Update(ctx context.Context, request Group) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just AccountServicePrincipalsPreview API methods +type accountServicePrincipalsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *accountServicePrincipalsPreviewImpl) Create(ctx context.Context, request ServicePrincipal) (*ServicePrincipal, error) { + var servicePrincipal ServicePrincipal + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &servicePrincipal) + return &servicePrincipal, err +} + +func (a *accountServicePrincipalsPreviewImpl) Delete(ctx context.Context, request DeleteAccountServicePrincipalRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *accountServicePrincipalsPreviewImpl) Get(ctx context.Context, request GetAccountServicePrincipalRequest) (*ServicePrincipal, error) { + var servicePrincipal ServicePrincipal + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &servicePrincipal) + return &servicePrincipal, err +} + +// List service principals. +// +// Gets the set of service principals associated with a Databricks account. +func (a *accountServicePrincipalsPreviewImpl) List(ctx context.Context, request ListAccountServicePrincipalsRequest) listing.Iterator[ServicePrincipal] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListAccountServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListServicePrincipalResponse) []ServicePrincipal { + return resp.Resources + } + getNextReq := func(resp *ListServicePrincipalResponse) *ListAccountServicePrincipalsRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[ServicePrincipal, string]( + iterator, + func(item ServicePrincipal) string { + return item.Id + }) + return dedupedIterator +} + +// List service principals. +// +// Gets the set of service principals associated with a Databricks account. +func (a *accountServicePrincipalsPreviewImpl) ListAll(ctx context.Context, request ListAccountServicePrincipalsRequest) ([]ServicePrincipal, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[ServicePrincipal, int64](ctx, iterator, request.Count) + +} +func (a *accountServicePrincipalsPreviewImpl) internalList(ctx context.Context, request ListAccountServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { + var listServicePrincipalResponse ListServicePrincipalResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listServicePrincipalResponse) + return &listServicePrincipalResponse, err +} + +func (a *accountServicePrincipalsPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { + var patchResponse PatchResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) + return err +} + +func (a *accountServicePrincipalsPreviewImpl) Update(ctx context.Context, request ServicePrincipal) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just AccountUsersPreview API methods +type accountUsersPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *accountUsersPreviewImpl) Create(ctx context.Context, request User) (*User, error) { + var user User + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &user) + return &user, err +} + +func (a *accountUsersPreviewImpl) Delete(ctx context.Context, request DeleteAccountUserRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *accountUsersPreviewImpl) Get(ctx context.Context, request GetAccountUserRequest) (*User, error) { + var user User + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &user) + return &user, err +} + +// List users. +// +// Gets details for all the users associated with a Databricks account. +func (a *accountUsersPreviewImpl) List(ctx context.Context, request ListAccountUsersRequest) listing.Iterator[User] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListAccountUsersRequest) (*ListUsersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListUsersResponse) []User { + return resp.Resources + } + getNextReq := func(resp *ListUsersResponse) *ListAccountUsersRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[User, string]( + iterator, + func(item User) string { + return item.Id + }) + return dedupedIterator +} + +// List users. +// +// Gets details for all the users associated with a Databricks account. +func (a *accountUsersPreviewImpl) ListAll(ctx context.Context, request ListAccountUsersRequest) ([]User, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[User, int64](ctx, iterator, request.Count) + +} +func (a *accountUsersPreviewImpl) internalList(ctx context.Context, request ListAccountUsersRequest) (*ListUsersResponse, error) { + var listUsersResponse ListUsersResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listUsersResponse) + return &listUsersResponse, err +} + +func (a *accountUsersPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { + var patchResponse PatchResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) + return err +} + +func (a *accountUsersPreviewImpl) Update(ctx context.Context, request User) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just CurrentUserPreview API methods +type currentUserPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *currentUserPreviewImpl) Me(ctx context.Context) (*User, error) { + var user User + path := "/api/2.0preview/preview/scim/v2/Me" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &user) + return &user, err +} + +// unexported type that holds implementations of just GroupsPreview API methods +type groupsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *groupsPreviewImpl) Create(ctx context.Context, request Group) (*Group, error) { + var group Group + path := "/api/2.0preview/preview/scim/v2/Groups" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &group) + return &group, err +} + +func (a *groupsPreviewImpl) Delete(ctx context.Context, request DeleteGroupRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Groups/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *groupsPreviewImpl) Get(ctx context.Context, request GetGroupRequest) (*Group, error) { + var group Group + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Groups/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &group) + return &group, err +} + +// List group details. +// +// Gets all details of the groups associated with the Databricks workspace. +func (a *groupsPreviewImpl) List(ctx context.Context, request ListGroupsRequest) listing.Iterator[Group] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListGroupsRequest) (*ListGroupsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListGroupsResponse) []Group { + return resp.Resources + } + getNextReq := func(resp *ListGroupsResponse) *ListGroupsRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[Group, string]( + iterator, + func(item Group) string { + return item.Id + }) + return dedupedIterator +} + +// List group details. +// +// Gets all details of the groups associated with the Databricks workspace. +func (a *groupsPreviewImpl) ListAll(ctx context.Context, request ListGroupsRequest) ([]Group, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[Group, int64](ctx, iterator, request.Count) + +} +func (a *groupsPreviewImpl) internalList(ctx context.Context, request ListGroupsRequest) (*ListGroupsResponse, error) { + var listGroupsResponse ListGroupsResponse + path := "/api/2.0preview/preview/scim/v2/Groups" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listGroupsResponse) + return &listGroupsResponse, err +} + +func (a *groupsPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { + var patchResponse PatchResponse + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Groups/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) + return err +} + +func (a *groupsPreviewImpl) Update(ctx context.Context, request Group) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Groups/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just PermissionMigrationPreview API methods +type permissionMigrationPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *permissionMigrationPreviewImpl) MigratePermissions(ctx context.Context, request MigratePermissionsRequest) (*MigratePermissionsResponse, error) { + var migratePermissionsResponse MigratePermissionsResponse + path := "/api/2.0preview/permissionmigration" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &migratePermissionsResponse) + return &migratePermissionsResponse, err +} + +// unexported type that holds implementations of just PermissionsPreview API methods +type permissionsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *permissionsPreviewImpl) Get(ctx context.Context, request GetPermissionRequest) (*ObjectPermissions, error) { + var objectPermissions ObjectPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.RequestObjectType, request.RequestObjectId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &objectPermissions) + return &objectPermissions, err +} + +func (a *permissionsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetPermissionLevelsRequest) (*GetPermissionLevelsResponse, error) { + var getPermissionLevelsResponse GetPermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v/permissionLevels", request.RequestObjectType, request.RequestObjectId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPermissionLevelsResponse) + return &getPermissionLevelsResponse, err +} + +func (a *permissionsPreviewImpl) Set(ctx context.Context, request PermissionsRequest) (*ObjectPermissions, error) { + var objectPermissions ObjectPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.RequestObjectType, request.RequestObjectId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &objectPermissions) + return &objectPermissions, err +} + +func (a *permissionsPreviewImpl) Update(ctx context.Context, request PermissionsRequest) (*ObjectPermissions, error) { + var objectPermissions ObjectPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.RequestObjectType, request.RequestObjectId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &objectPermissions) + return &objectPermissions, err +} + +// unexported type that holds implementations of just ServicePrincipalsPreview API methods +type servicePrincipalsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *servicePrincipalsPreviewImpl) Create(ctx context.Context, request ServicePrincipal) (*ServicePrincipal, error) { + var servicePrincipal ServicePrincipal + path := "/api/2.0preview/preview/scim/v2/ServicePrincipals" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &servicePrincipal) + return &servicePrincipal, err +} + +func (a *servicePrincipalsPreviewImpl) Delete(ctx context.Context, request DeleteServicePrincipalRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/ServicePrincipals/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *servicePrincipalsPreviewImpl) Get(ctx context.Context, request GetServicePrincipalRequest) (*ServicePrincipal, error) { + var servicePrincipal ServicePrincipal + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/ServicePrincipals/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &servicePrincipal) + return &servicePrincipal, err +} + +// List service principals. +// +// Gets the set of service principals associated with a Databricks workspace. +func (a *servicePrincipalsPreviewImpl) List(ctx context.Context, request ListServicePrincipalsRequest) listing.Iterator[ServicePrincipal] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListServicePrincipalResponse) []ServicePrincipal { + return resp.Resources + } + getNextReq := func(resp *ListServicePrincipalResponse) *ListServicePrincipalsRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[ServicePrincipal, string]( + iterator, + func(item ServicePrincipal) string { + return item.Id + }) + return dedupedIterator +} + +// List service principals. +// +// Gets the set of service principals associated with a Databricks workspace. +func (a *servicePrincipalsPreviewImpl) ListAll(ctx context.Context, request ListServicePrincipalsRequest) ([]ServicePrincipal, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[ServicePrincipal, int64](ctx, iterator, request.Count) + +} +func (a *servicePrincipalsPreviewImpl) internalList(ctx context.Context, request ListServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { + var listServicePrincipalResponse ListServicePrincipalResponse + path := "/api/2.0preview/preview/scim/v2/ServicePrincipals" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listServicePrincipalResponse) + return &listServicePrincipalResponse, err +} + +func (a *servicePrincipalsPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { + var patchResponse PatchResponse + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/ServicePrincipals/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) + return err +} + +func (a *servicePrincipalsPreviewImpl) Update(ctx context.Context, request ServicePrincipal) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/ServicePrincipals/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just UsersPreview API methods +type usersPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *usersPreviewImpl) Create(ctx context.Context, request User) (*User, error) { + var user User + path := "/api/2.0preview/preview/scim/v2/Users" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &user) + return &user, err +} + +func (a *usersPreviewImpl) Delete(ctx context.Context, request DeleteUserRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Users/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *usersPreviewImpl) Get(ctx context.Context, request GetUserRequest) (*User, error) { + var user User + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Users/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &user) + return &user, err +} + +func (a *usersPreviewImpl) GetPermissionLevels(ctx context.Context) (*GetPasswordPermissionLevelsResponse, error) { + var getPasswordPermissionLevelsResponse GetPasswordPermissionLevelsResponse + path := "/api/2.0preview/permissions/authorization/passwords/permissionLevels" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getPasswordPermissionLevelsResponse) + return &getPasswordPermissionLevelsResponse, err +} + +func (a *usersPreviewImpl) GetPermissions(ctx context.Context) (*PasswordPermissions, error) { + var passwordPermissions PasswordPermissions + path := "/api/2.0preview/permissions/authorization/passwords" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &passwordPermissions) + return &passwordPermissions, err +} + +// List users. +// +// Gets details for all the users associated with a Databricks workspace. +func (a *usersPreviewImpl) List(ctx context.Context, request ListUsersRequest) listing.Iterator[User] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListUsersRequest) (*ListUsersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListUsersResponse) []User { + return resp.Resources + } + getNextReq := func(resp *ListUsersResponse) *ListUsersRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[User, string]( + iterator, + func(item User) string { + return item.Id + }) + return dedupedIterator +} + +// List users. +// +// Gets details for all the users associated with a Databricks workspace. +func (a *usersPreviewImpl) ListAll(ctx context.Context, request ListUsersRequest) ([]User, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[User, int64](ctx, iterator, request.Count) + +} +func (a *usersPreviewImpl) internalList(ctx context.Context, request ListUsersRequest) (*ListUsersResponse, error) { + var listUsersResponse ListUsersResponse + path := "/api/2.0preview/preview/scim/v2/Users" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listUsersResponse) + return &listUsersResponse, err +} + +func (a *usersPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { + var patchResponse PatchResponse + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Users/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) + return err +} + +func (a *usersPreviewImpl) SetPermissions(ctx context.Context, request PasswordPermissionsRequest) (*PasswordPermissions, error) { + var passwordPermissions PasswordPermissions + path := "/api/2.0preview/permissions/authorization/passwords" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &passwordPermissions) + return &passwordPermissions, err +} + +func (a *usersPreviewImpl) Update(ctx context.Context, request User) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Users/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) + return err +} + +func (a *usersPreviewImpl) UpdatePermissions(ctx context.Context, request PasswordPermissionsRequest) (*PasswordPermissions, error) { + var passwordPermissions PasswordPermissions + path := "/api/2.0preview/permissions/authorization/passwords" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &passwordPermissions) + return &passwordPermissions, err +} + +// unexported type that holds implementations of just WorkspaceAssignmentPreview API methods +type workspaceAssignmentPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *workspaceAssignmentPreviewImpl) Delete(ctx context.Context, request DeleteWorkspaceAssignmentRequest) error { + var deleteWorkspacePermissionAssignmentResponse DeleteWorkspacePermissionAssignmentResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/permissionassignments/principals/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.PrincipalId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteWorkspacePermissionAssignmentResponse) + return err +} + +func (a *workspaceAssignmentPreviewImpl) Get(ctx context.Context, request GetWorkspaceAssignmentRequest) (*WorkspacePermissions, error) { + var workspacePermissions WorkspacePermissions + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/permissionassignments/permissions", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &workspacePermissions) + return &workspacePermissions, err +} + +// Get permission assignments. +// +// Get the permission assignments for the specified Databricks account and +// Databricks workspace. +func (a *workspaceAssignmentPreviewImpl) List(ctx context.Context, request ListWorkspaceAssignmentRequest) listing.Iterator[PermissionAssignment] { + + getNextPage := func(ctx context.Context, req ListWorkspaceAssignmentRequest) (*PermissionAssignments, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *PermissionAssignments) []PermissionAssignment { + return resp.PermissionAssignments + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get permission assignments. +// +// Get the permission assignments for the specified Databricks account and +// Databricks workspace. +func (a *workspaceAssignmentPreviewImpl) ListAll(ctx context.Context, request ListWorkspaceAssignmentRequest) ([]PermissionAssignment, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[PermissionAssignment](ctx, iterator) +} +func (a *workspaceAssignmentPreviewImpl) internalList(ctx context.Context, request ListWorkspaceAssignmentRequest) (*PermissionAssignments, error) { + var permissionAssignments PermissionAssignments + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/permissionassignments", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &permissionAssignments) + return &permissionAssignments, err +} + +func (a *workspaceAssignmentPreviewImpl) Update(ctx context.Context, request UpdateWorkspaceAssignments) (*PermissionAssignment, error) { + var permissionAssignment PermissionAssignment + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/permissionassignments/principals/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.PrincipalId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &permissionAssignment) + return &permissionAssignment, err +} diff --git a/iam/v2preview/model.go b/iam/v2preview/model.go new file mode 100755 index 000000000..a49300b3c --- /dev/null +++ b/iam/v2preview/model.go @@ -0,0 +1,1513 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package iampreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type AccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel PermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AccessControlResponse struct { + // All permissions. + AllPermissions []Permission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// represents an identity trying to access a resource - user or a service +// principal group can be a principal of a permission set assignment but an +// actor is always a user or a service principal +type Actor struct { + ActorId int64 `json:"actor_id,omitempty" url:"actor_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Actor) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Actor) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Check access policy to a resource +type CheckPolicyRequest struct { + Actor Actor `json:"-" url:"actor"` + + AuthzIdentity RequestAuthzIdentity `json:"-" url:"authz_identity"` + + ConsistencyToken ConsistencyToken `json:"-" url:"consistency_token"` + + Permission string `json:"-" url:"permission"` + // Ex: (servicePrincipal/use, + // accounts//servicePrincipals/) Ex: + // (servicePrincipal.ruleSet/update, + // accounts//servicePrincipals//ruleSets/default) + Resource string `json:"-" url:"resource"` + + ResourceInfo *ResourceInfo `json:"-" url:"resource_info,omitempty"` +} + +type CheckPolicyResponse struct { + ConsistencyToken ConsistencyToken `json:"consistency_token"` + + IsPermitted bool `json:"is_permitted,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CheckPolicyResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CheckPolicyResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ComplexValue struct { + Display string `json:"display,omitempty"` + + Primary bool `json:"primary,omitempty"` + + Ref string `json:"$ref,omitempty"` + + Type string `json:"type,omitempty"` + + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ComplexValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ComplexValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ConsistencyToken struct { + Value string `json:"value"` +} + +// Delete a group +type DeleteAccountGroupRequest struct { + // Unique ID for a group in the Databricks account. + Id string `json:"-" url:"-"` +} + +// Delete a service principal +type DeleteAccountServicePrincipalRequest struct { + // Unique ID for a service principal in the Databricks account. + Id string `json:"-" url:"-"` +} + +// Delete a user +type DeleteAccountUserRequest struct { + // Unique ID for a user in the Databricks account. + Id string `json:"-" url:"-"` +} + +// Delete a group +type DeleteGroupRequest struct { + // Unique ID for a group in the Databricks workspace. + Id string `json:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete a service principal +type DeleteServicePrincipalRequest struct { + // Unique ID for a service principal in the Databricks workspace. + Id string `json:"-" url:"-"` +} + +// Delete a user +type DeleteUserRequest struct { + // Unique ID for a user in the Databricks workspace. + Id string `json:"-" url:"-"` +} + +// Delete permissions assignment +type DeleteWorkspaceAssignmentRequest struct { + // The ID of the user, service principal, or group. + PrincipalId int64 `json:"-" url:"-"` + // The workspace ID for the account. + WorkspaceId int64 `json:"-" url:"-"` +} + +type DeleteWorkspacePermissionAssignmentResponse struct { +} + +// Get group details +type GetAccountGroupRequest struct { + // Unique ID for a group in the Databricks account. + Id string `json:"-" url:"-"` +} + +// Get service principal details +type GetAccountServicePrincipalRequest struct { + // Unique ID for a service principal in the Databricks account. + Id string `json:"-" url:"-"` +} + +// Get user details +type GetAccountUserRequest struct { + // Comma-separated list of attributes to return in response. + Attributes string `json:"-" url:"attributes,omitempty"` + // Desired number of results per page. Default is 10000. + Count int `json:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes string `json:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter string `json:"-" url:"filter,omitempty"` + // Unique ID for a user in the Databricks account. + Id string `json:"-" url:"-"` + // Attribute to sort the results. Multi-part paths are supported. For + // example, `userName`, `name.givenName`, and `emails`. + SortBy string `json:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder GetSortOrder `json:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex int `json:"-" url:"startIndex,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetAccountUserRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetAccountUserRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get assignable roles for a resource +type GetAssignableRolesForResourceRequest struct { + // The resource name for which assignable roles will be listed. + Resource string `json:"-" url:"resource"` +} + +type GetAssignableRolesForResourceResponse struct { + Roles []Role `json:"roles,omitempty"` +} + +// Get group details +type GetGroupRequest struct { + // Unique ID for a group in the Databricks workspace. + Id string `json:"-" url:"-"` +} + +type GetPasswordPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []PasswordPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get object permission levels +type GetPermissionLevelsRequest struct { + // + RequestObjectId string `json:"-" url:"-"` + // + RequestObjectType string `json:"-" url:"-"` +} + +type GetPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []PermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get object permissions +type GetPermissionRequest struct { + // The id of the request object. + RequestObjectId string `json:"-" url:"-"` + // The type of the request object. Can be one of the following: alerts, + // authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, + // directories, experiments, files, instance-pools, jobs, notebooks, + // pipelines, queries, registered-models, repos, serving-endpoints, or + // warehouses. + RequestObjectType string `json:"-" url:"-"` +} + +// Get a rule set +type GetRuleSetRequest struct { + // Etag used for versioning. The response is at least as fresh as the eTag + // provided. Etag is used for optimistic concurrency control as a way to + // help prevent simultaneous updates of a rule set from overwriting each + // other. It is strongly suggested that systems make use of the etag in the + // read -> modify -> write pattern to perform rule set updates in order to + // avoid race conditions that is get an etag from a GET rule set request, + // and pass it with the PUT update request to identify the rule set version + // you are updating. + Etag string `json:"-" url:"etag"` + // The ruleset name associated with the request. + Name string `json:"-" url:"name"` +} + +// Get service principal details +type GetServicePrincipalRequest struct { + // Unique ID for a service principal in the Databricks workspace. + Id string `json:"-" url:"-"` +} + +type GetSortOrder string + +const GetSortOrderAscending GetSortOrder = `ascending` + +const GetSortOrderDescending GetSortOrder = `descending` + +// String representation for [fmt.Print] +func (f *GetSortOrder) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetSortOrder) Set(v string) error { + switch v { + case `ascending`, `descending`: + *f = GetSortOrder(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ascending", "descending"`, v) + } +} + +// Type always returns GetSortOrder to satisfy [pflag.Value] interface +func (f *GetSortOrder) Type() string { + return "GetSortOrder" +} + +// Get user details +type GetUserRequest struct { + // Comma-separated list of attributes to return in response. + Attributes string `json:"-" url:"attributes,omitempty"` + // Desired number of results per page. + Count int `json:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes string `json:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter string `json:"-" url:"filter,omitempty"` + // Unique ID for a user in the Databricks workspace. + Id string `json:"-" url:"-"` + // Attribute to sort the results. Multi-part paths are supported. For + // example, `userName`, `name.givenName`, and `emails`. + SortBy string `json:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder GetSortOrder `json:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex int `json:"-" url:"startIndex,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetUserRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetUserRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List workspace permissions +type GetWorkspaceAssignmentRequest struct { + // The workspace ID. + WorkspaceId int64 `json:"-" url:"-"` +} + +type GrantRule struct { + // Principals this grant rule applies to. + Principals []string `json:"principals,omitempty"` + // Role that is assigned to the list of principals. + Role string `json:"role"` +} + +type Group struct { + // String that represents a human-readable group name + DisplayName string `json:"displayName,omitempty"` + // Entitlements assigned to the group. See [assigning entitlements] for a + // full list of supported values. + // + // [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + Entitlements []ComplexValue `json:"entitlements,omitempty"` + + ExternalId string `json:"externalId,omitempty"` + + Groups []ComplexValue `json:"groups,omitempty"` + // Databricks group ID + Id string `json:"id,omitempty" url:"-"` + + Members []ComplexValue `json:"members,omitempty"` + // Container for the group identifier. Workspace local versus account. + Meta *ResourceMeta `json:"meta,omitempty"` + // Corresponds to AWS instance profile/arn role. + Roles []ComplexValue `json:"roles,omitempty"` + // The schema of the group. + Schemas []GroupSchema `json:"schemas,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Group) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Group) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GroupSchema string + +const GroupSchemaUrnIetfParamsScimSchemasCore20Group GroupSchema = `urn:ietf:params:scim:schemas:core:2.0:Group` + +// String representation for [fmt.Print] +func (f *GroupSchema) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GroupSchema) Set(v string) error { + switch v { + case `urn:ietf:params:scim:schemas:core:2.0:Group`: + *f = GroupSchema(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "urn:ietf:params:scim:schemas:core:2.0:Group"`, v) + } +} + +// Type always returns GroupSchema to satisfy [pflag.Value] interface +func (f *GroupSchema) Type() string { + return "GroupSchema" +} + +// List group details +type ListAccountGroupsRequest struct { + // Comma-separated list of attributes to return in response. + Attributes string `json:"-" url:"attributes,omitempty"` + // Desired number of results per page. Default is 10000. + Count int64 `json:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes string `json:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter string `json:"-" url:"filter,omitempty"` + // Attribute to sort the results. + SortBy string `json:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `json:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex int64 `json:"-" url:"startIndex,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAccountGroupsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAccountGroupsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List service principals +type ListAccountServicePrincipalsRequest struct { + // Comma-separated list of attributes to return in response. + Attributes string `json:"-" url:"attributes,omitempty"` + // Desired number of results per page. Default is 10000. + Count int64 `json:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes string `json:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter string `json:"-" url:"filter,omitempty"` + // Attribute to sort the results. + SortBy string `json:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `json:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex int64 `json:"-" url:"startIndex,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAccountServicePrincipalsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAccountServicePrincipalsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List users +type ListAccountUsersRequest struct { + // Comma-separated list of attributes to return in response. + Attributes string `json:"-" url:"attributes,omitempty"` + // Desired number of results per page. Default is 10000. + Count int64 `json:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes string `json:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter string `json:"-" url:"filter,omitempty"` + // Attribute to sort the results. Multi-part paths are supported. For + // example, `userName`, `name.givenName`, and `emails`. + SortBy string `json:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `json:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex int64 `json:"-" url:"startIndex,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAccountUsersRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAccountUsersRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List group details +type ListGroupsRequest struct { + // Comma-separated list of attributes to return in response. + Attributes string `json:"-" url:"attributes,omitempty"` + // Desired number of results per page. + Count int64 `json:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes string `json:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter string `json:"-" url:"filter,omitempty"` + // Attribute to sort the results. + SortBy string `json:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `json:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex int64 `json:"-" url:"startIndex,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListGroupsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListGroupsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListGroupsResponse struct { + // Total results returned in the response. + ItemsPerPage int64 `json:"itemsPerPage,omitempty"` + // User objects returned in the response. + Resources []Group `json:"Resources,omitempty"` + // The schema of the service principal. + Schemas []ListResponseSchema `json:"schemas,omitempty"` + // Starting index of all the results that matched the request filters. First + // item is number 1. + StartIndex int64 `json:"startIndex,omitempty"` + // Total results that match the request filters. + TotalResults int64 `json:"totalResults,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListGroupsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListGroupsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListResponseSchema string + +const ListResponseSchemaUrnIetfParamsScimApiMessages20ListResponse ListResponseSchema = `urn:ietf:params:scim:api:messages:2.0:ListResponse` + +// String representation for [fmt.Print] +func (f *ListResponseSchema) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListResponseSchema) Set(v string) error { + switch v { + case `urn:ietf:params:scim:api:messages:2.0:ListResponse`: + *f = ListResponseSchema(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "urn:ietf:params:scim:api:messages:2.0:ListResponse"`, v) + } +} + +// Type always returns ListResponseSchema to satisfy [pflag.Value] interface +func (f *ListResponseSchema) Type() string { + return "ListResponseSchema" +} + +type ListServicePrincipalResponse struct { + // Total results returned in the response. + ItemsPerPage int64 `json:"itemsPerPage,omitempty"` + // User objects returned in the response. + Resources []ServicePrincipal `json:"Resources,omitempty"` + // The schema of the List response. + Schemas []ListResponseSchema `json:"schemas,omitempty"` + // Starting index of all the results that matched the request filters. First + // item is number 1. + StartIndex int64 `json:"startIndex,omitempty"` + // Total results that match the request filters. + TotalResults int64 `json:"totalResults,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListServicePrincipalResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListServicePrincipalResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List service principals +type ListServicePrincipalsRequest struct { + // Comma-separated list of attributes to return in response. + Attributes string `json:"-" url:"attributes,omitempty"` + // Desired number of results per page. + Count int64 `json:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes string `json:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter string `json:"-" url:"filter,omitempty"` + // Attribute to sort the results. + SortBy string `json:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `json:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex int64 `json:"-" url:"startIndex,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListServicePrincipalsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListServicePrincipalsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListSortOrder string + +const ListSortOrderAscending ListSortOrder = `ascending` + +const ListSortOrderDescending ListSortOrder = `descending` + +// String representation for [fmt.Print] +func (f *ListSortOrder) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListSortOrder) Set(v string) error { + switch v { + case `ascending`, `descending`: + *f = ListSortOrder(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ascending", "descending"`, v) + } +} + +// Type always returns ListSortOrder to satisfy [pflag.Value] interface +func (f *ListSortOrder) Type() string { + return "ListSortOrder" +} + +// List users +type ListUsersRequest struct { + // Comma-separated list of attributes to return in response. + Attributes string `json:"-" url:"attributes,omitempty"` + // Desired number of results per page. + Count int64 `json:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes string `json:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter string `json:"-" url:"filter,omitempty"` + // Attribute to sort the results. Multi-part paths are supported. For + // example, `userName`, `name.givenName`, and `emails`. + SortBy string `json:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `json:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex int64 `json:"-" url:"startIndex,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListUsersRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListUsersRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListUsersResponse struct { + // Total results returned in the response. + ItemsPerPage int64 `json:"itemsPerPage,omitempty"` + // User objects returned in the response. + Resources []User `json:"Resources,omitempty"` + // The schema of the List response. + Schemas []ListResponseSchema `json:"schemas,omitempty"` + // Starting index of all the results that matched the request filters. First + // item is number 1. + StartIndex int64 `json:"startIndex,omitempty"` + // Total results that match the request filters. + TotalResults int64 `json:"totalResults,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListUsersResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListUsersResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get permission assignments +type ListWorkspaceAssignmentRequest struct { + // The workspace ID for the account. + WorkspaceId int64 `json:"-" url:"-"` +} + +type MigratePermissionsRequest struct { + // The name of the workspace group that permissions will be migrated from. + FromWorkspaceGroupName string `json:"from_workspace_group_name"` + // The maximum number of permissions that will be migrated. + Size int `json:"size,omitempty"` + // The name of the account group that permissions will be migrated to. + ToAccountGroupName string `json:"to_account_group_name"` + // WorkspaceId of the associated workspace where the permission migration + // will occur. + WorkspaceId int64 `json:"workspace_id"` + + ForceSendFields []string `json:"-"` +} + +func (s *MigratePermissionsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MigratePermissionsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type MigratePermissionsResponse struct { + // Number of permissions migrated. + PermissionsMigrated int `json:"permissions_migrated,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MigratePermissionsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MigratePermissionsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Name struct { + // Family name of the Databricks user. + FamilyName string `json:"familyName,omitempty"` + // Given name of the Databricks user. + GivenName string `json:"givenName,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Name) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Name) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ObjectPermissions struct { + AccessControlList []AccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ObjectPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ObjectPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PartialUpdate struct { + // Unique ID for a user in the Databricks workspace. + Id string `json:"-" url:"-"` + + Operations []Patch `json:"Operations,omitempty"` + // The schema of the patch request. Must be + // ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. + Schemas []PatchSchema `json:"schemas,omitempty"` +} + +type PasswordAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel PasswordPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PasswordAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PasswordAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PasswordAccessControlResponse struct { + // All permissions. + AllPermissions []PasswordPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PasswordAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PasswordAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PasswordPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel PasswordPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PasswordPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PasswordPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type PasswordPermissionLevel string + +const PasswordPermissionLevelCanUse PasswordPermissionLevel = `CAN_USE` + +// String representation for [fmt.Print] +func (f *PasswordPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PasswordPermissionLevel) Set(v string) error { + switch v { + case `CAN_USE`: + *f = PasswordPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_USE"`, v) + } +} + +// Type always returns PasswordPermissionLevel to satisfy [pflag.Value] interface +func (f *PasswordPermissionLevel) Type() string { + return "PasswordPermissionLevel" +} + +type PasswordPermissions struct { + AccessControlList []PasswordAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PasswordPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PasswordPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PasswordPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel PasswordPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PasswordPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PasswordPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PasswordPermissionsRequest struct { + AccessControlList []PasswordAccessControlRequest `json:"access_control_list,omitempty"` +} + +type Patch struct { + // Type of patch operation. + Op PatchOp `json:"op,omitempty"` + // Selection of patch operation + Path string `json:"path,omitempty"` + // Value to modify + Value any `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Patch) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Patch) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Type of patch operation. +type PatchOp string + +const PatchOpAdd PatchOp = `add` + +const PatchOpRemove PatchOp = `remove` + +const PatchOpReplace PatchOp = `replace` + +// String representation for [fmt.Print] +func (f *PatchOp) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PatchOp) Set(v string) error { + switch v { + case `add`, `remove`, `replace`: + *f = PatchOp(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "add", "remove", "replace"`, v) + } +} + +// Type always returns PatchOp to satisfy [pflag.Value] interface +func (f *PatchOp) Type() string { + return "PatchOp" +} + +type PatchResponse struct { +} + +type PatchSchema string + +const PatchSchemaUrnIetfParamsScimApiMessages20PatchOp PatchSchema = `urn:ietf:params:scim:api:messages:2.0:PatchOp` + +// String representation for [fmt.Print] +func (f *PatchSchema) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PatchSchema) Set(v string) error { + switch v { + case `urn:ietf:params:scim:api:messages:2.0:PatchOp`: + *f = PatchSchema(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "urn:ietf:params:scim:api:messages:2.0:PatchOp"`, v) + } +} + +// Type always returns PatchSchema to satisfy [pflag.Value] interface +func (f *PatchSchema) Type() string { + return "PatchSchema" +} + +type Permission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel PermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Permission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Permission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The output format for existing workspace PermissionAssignment records, which +// contains some info for user consumption. +type PermissionAssignment struct { + // Error response associated with a workspace permission assignment, if any. + Error string `json:"error,omitempty"` + // The permissions level of the principal. + Permissions []WorkspacePermission `json:"permissions,omitempty"` + // Information about the principal assigned to the workspace. + Principal *PrincipalOutput `json:"principal,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PermissionAssignment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PermissionAssignment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PermissionAssignments struct { + // Array of permissions assignments defined for a workspace. + PermissionAssignments []PermissionAssignment `json:"permission_assignments,omitempty"` +} + +// Permission level +type PermissionLevel string + +const PermissionLevelCanAttachTo PermissionLevel = `CAN_ATTACH_TO` + +const PermissionLevelCanBind PermissionLevel = `CAN_BIND` + +const PermissionLevelCanEdit PermissionLevel = `CAN_EDIT` + +const PermissionLevelCanEditMetadata PermissionLevel = `CAN_EDIT_METADATA` + +const PermissionLevelCanManage PermissionLevel = `CAN_MANAGE` + +const PermissionLevelCanManageProductionVersions PermissionLevel = `CAN_MANAGE_PRODUCTION_VERSIONS` + +const PermissionLevelCanManageRun PermissionLevel = `CAN_MANAGE_RUN` + +const PermissionLevelCanManageStagingVersions PermissionLevel = `CAN_MANAGE_STAGING_VERSIONS` + +const PermissionLevelCanMonitor PermissionLevel = `CAN_MONITOR` + +const PermissionLevelCanQuery PermissionLevel = `CAN_QUERY` + +const PermissionLevelCanRead PermissionLevel = `CAN_READ` + +const PermissionLevelCanRestart PermissionLevel = `CAN_RESTART` + +const PermissionLevelCanRun PermissionLevel = `CAN_RUN` + +const PermissionLevelCanUse PermissionLevel = `CAN_USE` + +const PermissionLevelCanView PermissionLevel = `CAN_VIEW` + +const PermissionLevelCanViewMetadata PermissionLevel = `CAN_VIEW_METADATA` + +const PermissionLevelIsOwner PermissionLevel = `IS_OWNER` + +// String representation for [fmt.Print] +func (f *PermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PermissionLevel) Set(v string) error { + switch v { + case `CAN_ATTACH_TO`, `CAN_BIND`, `CAN_EDIT`, `CAN_EDIT_METADATA`, `CAN_MANAGE`, `CAN_MANAGE_PRODUCTION_VERSIONS`, `CAN_MANAGE_RUN`, `CAN_MANAGE_STAGING_VERSIONS`, `CAN_MONITOR`, `CAN_QUERY`, `CAN_READ`, `CAN_RESTART`, `CAN_RUN`, `CAN_USE`, `CAN_VIEW`, `CAN_VIEW_METADATA`, `IS_OWNER`: + *f = PermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_ATTACH_TO", "CAN_BIND", "CAN_EDIT", "CAN_EDIT_METADATA", "CAN_MANAGE", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE_RUN", "CAN_MANAGE_STAGING_VERSIONS", "CAN_MONITOR", "CAN_QUERY", "CAN_READ", "CAN_RESTART", "CAN_RUN", "CAN_USE", "CAN_VIEW", "CAN_VIEW_METADATA", "IS_OWNER"`, v) + } +} + +// Type always returns PermissionLevel to satisfy [pflag.Value] interface +func (f *PermissionLevel) Type() string { + return "PermissionLevel" +} + +type PermissionOutput struct { + // The results of a permissions query. + Description string `json:"description,omitempty"` + + PermissionLevel WorkspacePermission `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PermissionOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PermissionOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel PermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PermissionsRequest struct { + AccessControlList []AccessControlRequest `json:"access_control_list,omitempty"` + // The id of the request object. + RequestObjectId string `json:"-" url:"-"` + // The type of the request object. Can be one of the following: alerts, + // authorization, clusters, cluster-policies, dashboards, dbsql-dashboards, + // directories, experiments, files, instance-pools, jobs, notebooks, + // pipelines, queries, registered-models, repos, serving-endpoints, or + // warehouses. + RequestObjectType string `json:"-" url:"-"` +} + +// Information about the principal assigned to the workspace. +type PrincipalOutput struct { + // The display name of the principal. + DisplayName string `json:"display_name,omitempty"` + // The group name of the group. Present only if the principal is a group. + GroupName string `json:"group_name,omitempty"` + // The unique, opaque id of the principal. + PrincipalId int64 `json:"principal_id,omitempty"` + // The name of the service principal. Present only if the principal is a + // service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // The username of the user. Present only if the principal is a user. + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PrincipalOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PrincipalOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Defines the identity to be used for authZ of the request on the server side. +// See one pager for for more information: http://go/acl/service-identity +type RequestAuthzIdentity string + +const RequestAuthzIdentityRequestAuthzIdentityServiceIdentity RequestAuthzIdentity = `REQUEST_AUTHZ_IDENTITY_SERVICE_IDENTITY` + +const RequestAuthzIdentityRequestAuthzIdentityUserContext RequestAuthzIdentity = `REQUEST_AUTHZ_IDENTITY_USER_CONTEXT` + +// String representation for [fmt.Print] +func (f *RequestAuthzIdentity) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RequestAuthzIdentity) Set(v string) error { + switch v { + case `REQUEST_AUTHZ_IDENTITY_SERVICE_IDENTITY`, `REQUEST_AUTHZ_IDENTITY_USER_CONTEXT`: + *f = RequestAuthzIdentity(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "REQUEST_AUTHZ_IDENTITY_SERVICE_IDENTITY", "REQUEST_AUTHZ_IDENTITY_USER_CONTEXT"`, v) + } +} + +// Type always returns RequestAuthzIdentity to satisfy [pflag.Value] interface +func (f *RequestAuthzIdentity) Type() string { + return "RequestAuthzIdentity" +} + +type ResourceInfo struct { + // Id of the current resource. + Id string `json:"id" url:"id"` + // The legacy acl path of the current resource. + LegacyAclPath string `json:"legacy_acl_path,omitempty" url:"legacy_acl_path,omitempty"` + // Parent resource info for the current resource. The parent may have + // another parent. + ParentResourceInfo *ResourceInfo `json:"parent_resource_info,omitempty" url:"parent_resource_info,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResourceInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResourceInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ResourceMeta struct { + // Identifier for group type. Can be local workspace group + // (`WorkspaceGroup`) or account group (`Group`). + ResourceType string `json:"resourceType,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResourceMeta) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResourceMeta) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Role struct { + // Role to assign to a principal or a list of principals on a resource. + Name string `json:"name"` +} + +type RuleSetResponse struct { + // Identifies the version of the rule set returned. + Etag string `json:"etag,omitempty"` + + GrantRules []GrantRule `json:"grant_rules,omitempty"` + // Name of the rule set. + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RuleSetResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RuleSetResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RuleSetUpdateRequest struct { + // The expected etag of the rule set to update. The update will fail if the + // value does not match the value that is stored in account access control + // service. + Etag string `json:"etag"` + + GrantRules []GrantRule `json:"grant_rules,omitempty"` + // Name of the rule set. + Name string `json:"name"` +} + +type ServicePrincipal struct { + // If this user is active + Active bool `json:"active,omitempty"` + // UUID relating to the service principal + ApplicationId string `json:"applicationId,omitempty"` + // String that represents a concatenation of given and family names. + DisplayName string `json:"displayName,omitempty"` + // Entitlements assigned to the service principal. See [assigning + // entitlements] for a full list of supported values. + // + // [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + Entitlements []ComplexValue `json:"entitlements,omitempty"` + + ExternalId string `json:"externalId,omitempty"` + + Groups []ComplexValue `json:"groups,omitempty"` + // Databricks service principal ID. + Id string `json:"id,omitempty" url:"-"` + // Corresponds to AWS instance profile/arn role. + Roles []ComplexValue `json:"roles,omitempty"` + // The schema of the List response. + Schemas []ServicePrincipalSchema `json:"schemas,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServicePrincipal) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServicePrincipal) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServicePrincipalSchema string + +const ServicePrincipalSchemaUrnIetfParamsScimSchemasCore20ServicePrincipal ServicePrincipalSchema = `urn:ietf:params:scim:schemas:core:2.0:ServicePrincipal` + +// String representation for [fmt.Print] +func (f *ServicePrincipalSchema) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServicePrincipalSchema) Set(v string) error { + switch v { + case `urn:ietf:params:scim:schemas:core:2.0:ServicePrincipal`: + *f = ServicePrincipalSchema(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "urn:ietf:params:scim:schemas:core:2.0:ServicePrincipal"`, v) + } +} + +// Type always returns ServicePrincipalSchema to satisfy [pflag.Value] interface +func (f *ServicePrincipalSchema) Type() string { + return "ServicePrincipalSchema" +} + +type UpdateResponse struct { +} + +type UpdateRuleSetRequest struct { + // Name of the rule set. + Name string `json:"name"` + + RuleSet RuleSetUpdateRequest `json:"rule_set"` +} + +type UpdateWorkspaceAssignments struct { + // Array of permissions assignments to update on the workspace. Valid values + // are "USER" and "ADMIN" (case-sensitive). If both "USER" and "ADMIN" are + // provided, "ADMIN" takes precedence. Other values will be ignored. Note + // that excluding this field, or providing unsupported values, will have the + // same effect as providing an empty list, which will result in the deletion + // of all permissions for the principal. + Permissions []WorkspacePermission `json:"permissions,omitempty"` + // The ID of the user, service principal, or group. + PrincipalId int64 `json:"-" url:"-"` + // The workspace ID. + WorkspaceId int64 `json:"-" url:"-"` +} + +type User struct { + // If this user is active + Active bool `json:"active,omitempty"` + // String that represents a concatenation of given and family names. For + // example `John Smith`. This field cannot be updated through the Workspace + // SCIM APIs when [identity federation is enabled]. Use Account SCIM APIs to + // update `displayName`. + // + // [identity federation is enabled]: https://docs.databricks.com/administration-guide/users-groups/best-practices.html#enable-identity-federation + DisplayName string `json:"displayName,omitempty"` + // All the emails associated with the Databricks user. + Emails []ComplexValue `json:"emails,omitempty"` + // Entitlements assigned to the user. See [assigning entitlements] for a + // full list of supported values. + // + // [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + Entitlements []ComplexValue `json:"entitlements,omitempty"` + // External ID is not currently supported. It is reserved for future use. + ExternalId string `json:"externalId,omitempty"` + + Groups []ComplexValue `json:"groups,omitempty"` + // Databricks user ID. This is automatically set by Databricks. Any value + // provided by the client will be ignored. + Id string `json:"id,omitempty" url:"-"` + + Name *Name `json:"name,omitempty"` + // Corresponds to AWS instance profile/arn role. + Roles []ComplexValue `json:"roles,omitempty"` + // The schema of the user. + Schemas []UserSchema `json:"schemas,omitempty"` + // Email address of the Databricks user. + UserName string `json:"userName,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *User) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s User) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UserSchema string + +const UserSchemaUrnIetfParamsScimSchemasCore20User UserSchema = `urn:ietf:params:scim:schemas:core:2.0:User` + +const UserSchemaUrnIetfParamsScimSchemasExtensionWorkspace20User UserSchema = `urn:ietf:params:scim:schemas:extension:workspace:2.0:User` + +// String representation for [fmt.Print] +func (f *UserSchema) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UserSchema) Set(v string) error { + switch v { + case `urn:ietf:params:scim:schemas:core:2.0:User`, `urn:ietf:params:scim:schemas:extension:workspace:2.0:User`: + *f = UserSchema(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "urn:ietf:params:scim:schemas:core:2.0:User", "urn:ietf:params:scim:schemas:extension:workspace:2.0:User"`, v) + } +} + +// Type always returns UserSchema to satisfy [pflag.Value] interface +func (f *UserSchema) Type() string { + return "UserSchema" +} + +type WorkspacePermission string + +const WorkspacePermissionAdmin WorkspacePermission = `ADMIN` + +const WorkspacePermissionUnknown WorkspacePermission = `UNKNOWN` + +const WorkspacePermissionUser WorkspacePermission = `USER` + +// String representation for [fmt.Print] +func (f *WorkspacePermission) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WorkspacePermission) Set(v string) error { + switch v { + case `ADMIN`, `UNKNOWN`, `USER`: + *f = WorkspacePermission(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ADMIN", "UNKNOWN", "USER"`, v) + } +} + +// Type always returns WorkspacePermission to satisfy [pflag.Value] interface +func (f *WorkspacePermission) Type() string { + return "WorkspacePermission" +} + +type WorkspacePermissions struct { + // Array of permissions defined for a workspace. + Permissions []PermissionOutput `json:"permissions,omitempty"` +} diff --git a/jobs/v2/model.go b/jobs/v2/model.go index 4a12a91a8..4b155968f 100755 --- a/jobs/v2/model.go +++ b/jobs/v2/model.go @@ -8,6 +8,231 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/marshal" ) +type Adlsgen2Info struct { + // abfss destination, e.g. + // `abfss://@.dfs.core.windows.net/`. + Destination string `json:"destination"` +} + +type AutoScale struct { + // The maximum number of workers to which the cluster can scale up when + // overloaded. Note that `max_workers` must be strictly greater than + // `min_workers`. + MaxWorkers int `json:"max_workers,omitempty"` + // The minimum number of workers to which the cluster can scale down when + // underutilized. It is also the initial number of workers the cluster will + // have after creation. + MinWorkers int `json:"min_workers,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AutoScale) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AutoScale) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AwsAttributes struct { + // Availability type used for all subsequent nodes past the + // `first_on_demand` ones. + // + // Note: If `first_on_demand` is zero, this availability type will be used + // for the entire cluster. + Availability AwsAvailability `json:"availability,omitempty"` + // The number of volumes launched for each instance. Users can choose up to + // 10 volumes. This feature is only enabled for supported node types. Legacy + // node types cannot specify custom EBS volumes. For node types with no + // instance store, at least one EBS volume needs to be specified; otherwise, + // cluster creation will fail. + // + // These EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc. Instance + // store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc. + // + // If EBS volumes are attached, Databricks will configure Spark to use only + // the EBS volumes for scratch storage because heterogenously sized scratch + // devices can lead to inefficient disk utilization. If no EBS volumes are + // attached, Databricks will configure Spark to use instance store volumes. + // + // Please note that if EBS volumes are specified, then the Spark + // configuration `spark.local.dir` will be overridden. + EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + // If using gp3 volumes, what IOPS to use for the disk. If this is not set, + // the maximum performance of a gp2 volume with the same volume size will be + // used. + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` + // The size of each EBS volume (in GiB) launched for each instance. For + // general purpose SSD, this value must be within the range 100 - 4096. For + // throughput optimized HDD, this value must be within the range 500 - 4096. + EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + // If using gp3 volumes, what throughput to use for the disk. If this is not + // set, the maximum performance of a gp2 volume with the same volume size + // will be used. + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` + // The type of EBS volumes that will be launched with this cluster. + EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty"` + // The first `first_on_demand` nodes of the cluster will be placed on + // on-demand instances. If this value is greater than 0, the cluster driver + // node in particular will be placed on an on-demand instance. If this value + // is greater than or equal to the current cluster size, all nodes will be + // placed on on-demand instances. If this value is less than the current + // cluster size, `first_on_demand` nodes will be placed on on-demand + // instances and the remainder will be placed on `availability` instances. + // Note that this value does not affect cluster size and cannot currently be + // mutated over the lifetime of a cluster. + FirstOnDemand int `json:"first_on_demand,omitempty"` + // Nodes for this cluster will only be placed on AWS instances with this + // instance profile. If ommitted, nodes will be placed on instances without + // an IAM instance profile. The instance profile must have previously been + // added to the Databricks environment by an account administrator. + // + // This feature may only be available to certain customer plans. + // + // If this field is ommitted, we will pull in the default from the conf if + // it exists. + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // The bid price for AWS spot instances, as a percentage of the + // corresponding instance type's on-demand price. For example, if this field + // is set to 50, and the cluster needs a new `r3.xlarge` spot instance, then + // the bid price is half of the price of on-demand `r3.xlarge` instances. + // Similarly, if this field is set to 200, the bid price is twice the price + // of on-demand `r3.xlarge` instances. If not specified, the default value + // is 100. When spot instances are requested for this cluster, only spot + // instances whose bid price percentage matches this field will be + // considered. Note that, for safety, we enforce this field to be no more + // than 10000. + // + // The default value and documentation here should be kept consistent with + // CommonConf.defaultSpotBidPricePercent and + // CommonConf.maxSpotBidPricePercent. + SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` + // Identifier for the availability zone/datacenter in which the cluster + // resides. This string will be of a form like "us-west-2a". The provided + // availability zone must be in the same region as the Databricks + // deployment. For example, "us-west-2a" is not a valid zone id if the + // Databricks deployment resides in the "us-east-1" region. This is an + // optional field at cluster creation, and if not specified, a default zone + // will be used. If the zone specified is "auto", will try to place cluster + // in a zone with high availability, and will retry placement in a different + // AZ if there is not enough capacity. The list of available zones as well + // as the default value can be found by using the `List Zones` method. + ZoneId string `json:"zone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AwsAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AwsAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Availability type used for all subsequent nodes past the `first_on_demand` +// ones. +// +// Note: If `first_on_demand` is zero, this availability type will be used for +// the entire cluster. +type AwsAvailability string + +const AwsAvailabilityOnDemand AwsAvailability = `ON_DEMAND` + +const AwsAvailabilitySpot AwsAvailability = `SPOT` + +const AwsAvailabilitySpotWithFallback AwsAvailability = `SPOT_WITH_FALLBACK` + +// String representation for [fmt.Print] +func (f *AwsAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AwsAvailability) Set(v string) error { + switch v { + case `ON_DEMAND`, `SPOT`, `SPOT_WITH_FALLBACK`: + *f = AwsAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND", "SPOT", "SPOT_WITH_FALLBACK"`, v) + } +} + +// Type always returns AwsAvailability to satisfy [pflag.Value] interface +func (f *AwsAvailability) Type() string { + return "AwsAvailability" +} + +type AzureAttributes struct { + // Availability type used for all subsequent nodes past the + // `first_on_demand` ones. Note: If `first_on_demand` is zero (which only + // happens on pool clusters), this availability type will be used for the + // entire cluster. + Availability AzureAvailability `json:"availability,omitempty"` + // The first `first_on_demand` nodes of the cluster will be placed on + // on-demand instances. This value should be greater than 0, to make sure + // the cluster driver node is placed on an on-demand instance. If this value + // is greater than or equal to the current cluster size, all nodes will be + // placed on on-demand instances. If this value is less than the current + // cluster size, `first_on_demand` nodes will be placed on on-demand + // instances and the remainder will be placed on `availability` instances. + // Note that this value does not affect cluster size and cannot currently be + // mutated over the lifetime of a cluster. + FirstOnDemand int `json:"first_on_demand,omitempty"` + // Defines values necessary to configure and run Azure Log Analytics agent + LogAnalyticsInfo *LogAnalyticsInfo `json:"log_analytics_info,omitempty"` + // The max bid price to be used for Azure spot instances. The Max price for + // the bid cannot be higher than the on-demand price of the instance. If not + // specified, the default value is -1, which specifies that the instance + // cannot be evicted on the basis of price, and only on the basis of + // availability. Further, the value should > 0 or -1. + SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AzureAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AzureAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Availability type used for all subsequent nodes past the `first_on_demand` +// ones. Note: If `first_on_demand` is zero (which only happens on pool +// clusters), this availability type will be used for the entire cluster. +type AzureAvailability string + +const AzureAvailabilityOnDemandAzure AzureAvailability = `ON_DEMAND_AZURE` + +const AzureAvailabilitySpotAzure AzureAvailability = `SPOT_AZURE` + +const AzureAvailabilitySpotWithFallbackAzure AzureAvailability = `SPOT_WITH_FALLBACK_AZURE` + +// String representation for [fmt.Print] +func (f *AzureAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AzureAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_AZURE`, `SPOT_AZURE`, `SPOT_WITH_FALLBACK_AZURE`: + *f = AzureAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_AZURE", "SPOT_AZURE", "SPOT_WITH_FALLBACK_AZURE"`, v) + } +} + +// Type always returns AzureAvailability to satisfy [pflag.Value] interface +func (f *AzureAvailability) Type() string { + return "AzureAvailability" +} + type BaseJob struct { // The time at which this job was created in epoch milliseconds // (milliseconds since 1/1/1970 UTC). @@ -369,6 +594,23 @@ type CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput struct { OutputSchemaInfo *OutputSchemaInfo `json:"output_schema_info,omitempty"` } +type ClientsTypes struct { + // With jobs set, the cluster can be used for jobs + Jobs bool `json:"jobs,omitempty"` + // With notebooks set, this cluster can be used for notebooks + Notebooks bool `json:"notebooks,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClientsTypes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClientsTypes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type ClusterInstance struct { // The canonical identifier for the cluster used by a run. This field is // always available for runs on existing clusters. For runs on new clusters, @@ -400,6 +642,21 @@ func (s ClusterInstance) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type ClusterLogConf struct { + // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : + // "dbfs:/home/cluster_log" } }` + Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` + // destination and either the region or endpoint need to be provided. e.g. + // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : + // "us-west-2" } }` Cluster iam role is used to access s3, please make sure + // the cluster iam role in `instance_profile_arn` has permission to write + // data to the s3 destination. + S3 *S3StorageInfo `json:"s3,omitempty"` + // destination needs to be provided. e.g. `{ "volumes" : { "destination" : + // "/Volumes/catalog/schema/volume/cluster_log" } }` + Volumes *VolumesStorageInfo `json:"volumes,omitempty"` +} + type ClusterSpec struct { // If existing_cluster_id, the ID of an existing cluster that is used for // all runs. When running jobs or tasks on an existing cluster, you may need @@ -679,6 +936,102 @@ type CronSchedule struct { TimezoneId string `json:"timezone_id"` } +// Data security mode decides what data governance model to use when accessing +// data from a cluster. +// +// The following modes can only be used with `kind`. * +// `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access +// mode depending on your compute configuration. * +// `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * +// `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. +// +// The following modes can be used regardless of `kind`. * `NONE`: No security +// isolation for multiple users sharing the cluster. Data governance features +// are not available in this mode. * `SINGLE_USER`: A secure cluster that can +// only be exclusively used by a single user specified in `single_user_name`. +// Most programming languages, cluster features and data governance features are +// available in this mode. * `USER_ISOLATION`: A secure cluster that can be +// shared by multiple users. Cluster users are fully isolated so that they +// cannot see each other's data and credentials. Most data governance features +// are supported in this mode. But programming languages and cluster features +// might be limited. +// +// The following modes are deprecated starting with Databricks Runtime 15.0 and +// will be removed for future Databricks Runtime versions: +// +// * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL +// clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating from +// legacy Passthrough on high concurrency clusters. * `LEGACY_SINGLE_USER`: This +// mode is for users migrating from legacy Passthrough on standard clusters. * +// `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have +// UC nor passthrough enabled. +type DataSecurityMode string + +// will choose the most appropriate access mode depending on your +// compute configuration. +const DataSecurityModeDataSecurityModeAuto DataSecurityMode = `DATA_SECURITY_MODE_AUTO` + +// Alias for `SINGLE_USER`. +const DataSecurityModeDataSecurityModeDedicated DataSecurityMode = `DATA_SECURITY_MODE_DEDICATED` + +// Alias for `USER_ISOLATION`. +const DataSecurityModeDataSecurityModeStandard DataSecurityMode = `DATA_SECURITY_MODE_STANDARD` + +// This mode is for users migrating from legacy Passthrough on high concurrency +// clusters. +const DataSecurityModeLegacyPassthrough DataSecurityMode = `LEGACY_PASSTHROUGH` + +// This mode is for users migrating from legacy Passthrough on standard +// clusters. +const DataSecurityModeLegacySingleUser DataSecurityMode = `LEGACY_SINGLE_USER` + +// This mode provides a way that doesn’t have UC nor passthrough enabled. +const DataSecurityModeLegacySingleUserStandard DataSecurityMode = `LEGACY_SINGLE_USER_STANDARD` + +// This mode is for users migrating from legacy Table ACL clusters. +const DataSecurityModeLegacyTableAcl DataSecurityMode = `LEGACY_TABLE_ACL` + +// No security isolation for multiple users sharing the cluster. Data governance +// features are not available in this mode. +const DataSecurityModeNone DataSecurityMode = `NONE` + +// A secure cluster that can only be exclusively used by a single user specified +// in `single_user_name`. Most programming languages, cluster features and data +// governance features are available in this mode. +const DataSecurityModeSingleUser DataSecurityMode = `SINGLE_USER` + +// A secure cluster that can be shared by multiple users. Cluster users are +// fully isolated so that they cannot see each other's data and credentials. +// Most data governance features are supported in this mode. But programming +// languages and cluster features might be limited. +const DataSecurityModeUserIsolation DataSecurityMode = `USER_ISOLATION` + +// String representation for [fmt.Print] +func (f *DataSecurityMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DataSecurityMode) Set(v string) error { + switch v { + case `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, `DATA_SECURITY_MODE_STANDARD`, `LEGACY_PASSTHROUGH`, `LEGACY_SINGLE_USER`, `LEGACY_SINGLE_USER_STANDARD`, `LEGACY_TABLE_ACL`, `NONE`, `SINGLE_USER`, `USER_ISOLATION`: + *f = DataSecurityMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATA_SECURITY_MODE_AUTO", "DATA_SECURITY_MODE_DEDICATED", "DATA_SECURITY_MODE_STANDARD", "LEGACY_PASSTHROUGH", "LEGACY_SINGLE_USER", "LEGACY_SINGLE_USER_STANDARD", "LEGACY_TABLE_ACL", "NONE", "SINGLE_USER", "USER_ISOLATION"`, v) + } +} + +// Type always returns DataSecurityMode to satisfy [pflag.Value] interface +func (f *DataSecurityMode) Type() string { + return "DataSecurityMode" +} + +type DbfsStorageInfo struct { + // dbfs destination, e.g. `dbfs:/my/path` + Destination string `json:"destination"` +} + type DbtOutput struct { // An optional map of headers to send when retrieving the artifact from the // `artifacts_link`. @@ -762,6 +1115,67 @@ type DeleteRun struct { type DeleteRunResponse struct { } +type DockerBasicAuth struct { + // Password of the user + Password string `json:"password,omitempty"` + // Name of the user + Username string `json:"username,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DockerBasicAuth) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DockerBasicAuth) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DockerImage struct { + BasicAuth *DockerBasicAuth `json:"basic_auth,omitempty"` + // URL of the docker image. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DockerImage) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DockerImage) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of EBS volumes that will be launched with this cluster. +type EbsVolumeType string + +const EbsVolumeTypeGeneralPurposeSsd EbsVolumeType = `GENERAL_PURPOSE_SSD` + +const EbsVolumeTypeThroughputOptimizedHdd EbsVolumeType = `THROUGHPUT_OPTIMIZED_HDD` + +// String representation for [fmt.Print] +func (f *EbsVolumeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EbsVolumeType) Set(v string) error { + switch v { + case `GENERAL_PURPOSE_SSD`, `THROUGHPUT_OPTIMIZED_HDD`: + *f = EbsVolumeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GENERAL_PURPOSE_SSD", "THROUGHPUT_OPTIMIZED_HDD"`, v) + } +} + +// Type always returns EbsVolumeType to satisfy [pflag.Value] interface +func (f *EbsVolumeType) Type() string { + return "EbsVolumeType" +} + // Represents a change to the job cluster's settings that would be required for // the job clusters to become compliant with their policies. type EnforcePolicyComplianceForJobResponseJobClusterSettingsChange struct { @@ -998,6 +1412,87 @@ func (f *Format) Type() string { return "Format" } +type GcpAttributes struct { + // This field determines whether the instance pool will contain preemptible + // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs + // if the former is unavailable. + Availability GcpAvailability `json:"availability,omitempty"` + // boot disk size in GB + BootDiskSize int `json:"boot_disk_size,omitempty"` + // If provided, the cluster will impersonate the google service account when + // accessing gcloud services (like GCS). The google service account must + // have previously been added to the Databricks environment by an account + // administrator. + GoogleServiceAccount string `json:"google_service_account,omitempty"` + // If provided, each node (workers and driver) in the cluster will have this + // number of local SSDs attached. Each local SSD is 375GB in size. Refer to + // [GCP documentation] for the supported number of local SSDs for each + // instance type. + // + // [GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds + LocalSsdCount int `json:"local_ssd_count,omitempty"` + // This field determines whether the spark executors will be scheduled to + // run on preemptible VMs (when set to true) versus standard compute engine + // VMs (when set to false; default). Note: Soon to be deprecated, use the + // availability field instead. + UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` + // Identifier for the availability zone in which the cluster resides. This + // can be one of the following: - "HA" => High availability, spread nodes + // across availability zones for a Databricks deployment region [default] - + // "AUTO" => Databricks picks an availability zone to schedule the cluster + // on. - A GCP availability zone => Pick One of the available zones for + // (machine type + region) from + // https://cloud.google.com/compute/docs/regions-zones. + ZoneId string `json:"zone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GcpAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GcpAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// This field determines whether the instance pool will contain preemptible VMs, +// on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the +// former is unavailable. +type GcpAvailability string + +const GcpAvailabilityOnDemandGcp GcpAvailability = `ON_DEMAND_GCP` + +const GcpAvailabilityPreemptibleGcp GcpAvailability = `PREEMPTIBLE_GCP` + +const GcpAvailabilityPreemptibleWithFallbackGcp GcpAvailability = `PREEMPTIBLE_WITH_FALLBACK_GCP` + +// String representation for [fmt.Print] +func (f *GcpAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GcpAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_GCP`, `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP`: + *f = GcpAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_GCP", "PREEMPTIBLE_GCP", "PREEMPTIBLE_WITH_FALLBACK_GCP"`, v) + } +} + +// Type always returns GcpAvailability to satisfy [pflag.Value] interface +func (f *GcpAvailability) Type() string { + return "GcpAvailability" +} + +type GcsStorageInfo struct { + // GCS destination/URI, e.g. `gs://my-bucket/some-prefix` + Destination string `json:"destination"` +} + // Get job permission levels type GetJobPermissionLevelsRequest struct { // The job for which to get or manage permissions. @@ -1196,6 +1691,34 @@ func (s GitSource) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type InitScriptInfo struct { + // destination needs to be provided. e.g. `{ "abfss" : { "destination" : + // "abfss://@.dfs.core.windows.net/" + // } } + Abfss *Adlsgen2Info `json:"abfss,omitempty"` + // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : + // "dbfs:/home/cluster_log" } }` + Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` + // destination needs to be provided. e.g. `{ "file" : { "destination" : + // "file:/my/local/file.sh" } }` + File *LocalFileInfo `json:"file,omitempty"` + // destination needs to be provided. e.g. `{ "gcs": { "destination": + // "gs://my-bucket/file.sh" } }` + Gcs *GcsStorageInfo `json:"gcs,omitempty"` + // destination and either the region or endpoint need to be provided. e.g. + // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : + // "us-west-2" } }` Cluster iam role is used to access s3, please make sure + // the cluster iam role in `instance_profile_arn` has permission to write + // data to the s3 destination. + S3 *S3StorageInfo `json:"s3,omitempty"` + // destination needs to be provided. e.g. `{ "volumes" : { "destination" : + // "/Volumes/my-init.sh" } }` + Volumes *VolumesStorageInfo `json:"volumes,omitempty"` + // destination needs to be provided. e.g. `{ "workspace" : { "destination" : + // "/Users/user1@databricks.com/my-init.sh" } }` + Workspace *WorkspaceStorageInfo `json:"workspace,omitempty"` +} + // Job was retrieved successfully. type Job struct { // The time at which this job was created in epoch milliseconds @@ -1293,7 +1816,7 @@ type JobCluster struct { // determine which cluster to launch for the task execution. JobClusterKey string `json:"job_cluster_key"` // If new_cluster, a description of a cluster that is created for each task. - NewCluster ClusterSpec `json:"new_cluster"` + NewCluster JobsClusterSpec `json:"new_cluster"` } type JobCompliance struct { @@ -1793,6 +2316,195 @@ func (f *JobSourceDirtyState) Type() string { return "JobSourceDirtyState" } +type JobsClusterSpec struct { + // When set to true, fixed and default values from the policy will be used + // for fields that are omitted. When set to false, only fixed values from + // the policy will be applied. + ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `json:"autoscale,omitempty"` + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` + // The configuration for delivering spark logs to a long-term storage + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName string `json:"cluster_name,omitempty"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // The following modes can only be used with `kind`. * + // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate + // access mode depending on your compute configuration. * + // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * + // `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. + // + // The following modes can be used regardless of `kind`. * `NONE`: No + // security isolation for multiple users sharing the cluster. Data + // governance features are not available in this mode. * `SINGLE_USER`: A + // secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` + + DockerImage *DockerImage `json:"docker_image,omitempty"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId string `json:"instance_pool_id,omitempty"` + // This field can only be used with `kind`. + // + // When set to true, Databricks will automatically set single node related + // `custom_tags`, `spark_conf`, and `num_workers` + IsSingleNode bool `json:"is_single_node,omitempty"` + // The kind of compute described by this compute specification. + // + // Depending on `kind`, different validations and default values will be + // applied. + // + // The first usage of this value is for the simple cluster form where it + // sets `kind = CLASSIC_PREVIEW`. + Kind Kind `json:"kind,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id,omitempty"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers int `json:"num_workers,omitempty"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId string `json:"policy_id,omitempty"` + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. + RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName string `json:"single_user_name,omitempty"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]string `json:"spark_conf,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion string `json:"spark_version,omitempty"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + // This field can only be used with `kind`. + // + // `effective_spark_version` is determined by `spark_version` (DBR release), + // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or + // not. + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` + + WorkloadType *WorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobsClusterSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobsClusterSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Specifies the health metric that is being evaluated for a particular health // rule. // @@ -1901,6 +2613,38 @@ type JobsHealthRules struct { Rules []JobsHealthRule `json:"rules,omitempty"` } +// The kind of compute described by this compute specification. +// +// Depending on `kind`, different validations and default values will be +// applied. +// +// The first usage of this value is for the simple cluster form where it sets +// `kind = CLASSIC_PREVIEW`. +type Kind string + +const KindClassicPreview Kind = `CLASSIC_PREVIEW` + +// String representation for [fmt.Print] +func (f *Kind) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Kind) Set(v string) error { + switch v { + case `CLASSIC_PREVIEW`: + *f = Kind(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC_PREVIEW"`, v) + } +} + +// Type always returns Kind to satisfy [pflag.Value] interface +func (f *Kind) Type() string { + return "Kind" +} + type Library struct { // Specification of a CRAN library to be installed as part of the library Cran *RCranLibrary `json:"cran,omitempty"` @@ -2125,6 +2869,28 @@ func (s ListRunsResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type LocalFileInfo struct { + // local file destination, e.g. `file:/my/local/file.sh` + Destination string `json:"destination"` +} + +type LogAnalyticsInfo struct { + // + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + // + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogAnalyticsInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogAnalyticsInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type MavenLibrary struct { // Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2". Coordinates string `json:"coordinates"` @@ -3859,6 +4625,86 @@ func (f *RunType) Type() string { return "RunType" } +// Determines the cluster's runtime engine, either standard or Photon. +// +// This field is not compatible with legacy `spark_version` values that contain +// `-photon-`. Remove `-photon-` from the `spark_version` and set +// `runtime_engine` to `PHOTON`. +// +// If left unspecified, the runtime engine defaults to standard unless the +// spark_version contains -photon-, in which case Photon will be used. +type RuntimeEngine string + +const RuntimeEngineNull RuntimeEngine = `NULL` + +const RuntimeEnginePhoton RuntimeEngine = `PHOTON` + +const RuntimeEngineStandard RuntimeEngine = `STANDARD` + +// String representation for [fmt.Print] +func (f *RuntimeEngine) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RuntimeEngine) Set(v string) error { + switch v { + case `NULL`, `PHOTON`, `STANDARD`: + *f = RuntimeEngine(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "NULL", "PHOTON", "STANDARD"`, v) + } +} + +// Type always returns RuntimeEngine to satisfy [pflag.Value] interface +func (f *RuntimeEngine) Type() string { + return "RuntimeEngine" +} + +type S3StorageInfo struct { + // (Optional) Set canned access control list for the logs, e.g. + // `bucket-owner-full-control`. If `canned_cal` is set, please make sure the + // cluster iam role has `s3:PutObjectAcl` permission on the destination + // bucket and prefix. The full list of possible canned acl can be found at + // http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl. + // Please also note that by default only the object owner gets full + // controls. If you are using cross account role for writing data, you may + // want to set `bucket-owner-full-control` to make bucket owner able to read + // the logs. + CannedAcl string `json:"canned_acl,omitempty"` + // S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be + // delivered using cluster iam role, please make sure you set cluster iam + // role and the role has write access to the destination. Please also note + // that you cannot use AWS keys to deliver logs. + Destination string `json:"destination"` + // (Optional) Flag to enable server side encryption, `false` by default. + EnableEncryption bool `json:"enable_encryption,omitempty"` + // (Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It + // will be used only when encryption is enabled and the default type is + // `sse-s3`. + EncryptionType string `json:"encryption_type,omitempty"` + // S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or + // endpoint needs to be set. If both are set, endpoint will be used. + Endpoint string `json:"endpoint,omitempty"` + // (Optional) Kms key which will be used if encryption is enabled and + // encryption type is set to `sse-kms`. + KmsKey string `json:"kms_key,omitempty"` + // S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. + // If both are set, endpoint will be used. + Region string `json:"region,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *S3StorageInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s S3StorageInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file // will be retrieved\ from the local Databricks workspace. When set to `GIT`, // the SQL file will be retrieved from a Git repository defined in `git_source`. @@ -4399,7 +5245,7 @@ type SubmitTask struct { Libraries []Library `json:"libraries,omitempty"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster *ClusterSpec `json:"new_cluster,omitempty"` + NewCluster *JobsClusterSpec `json:"new_cluster,omitempty"` // The task runs a notebook when the `notebook_task` field is present. NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications @@ -4556,7 +5402,7 @@ type Task struct { MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster *ClusterSpec `json:"new_cluster,omitempty"` + NewCluster *JobsClusterSpec `json:"new_cluster,omitempty"` // The task runs a notebook when the `notebook_task` field is present. NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications @@ -5191,6 +6037,12 @@ func (f *ViewsToExport) Type() string { return "ViewsToExport" } +type VolumesStorageInfo struct { + // Unity Catalog volumes file destination, e.g. + // `/Volumes/catalog/schema/volume/dir/file` + Destination string `json:"destination"` +} + type Webhook struct { Id string `json:"id"` } @@ -5221,3 +6073,14 @@ type WebhookNotifications struct { // the `on_success` property. OnSuccess []Webhook `json:"on_success,omitempty"` } + +type WorkloadType struct { + // defined what type of clients can use the cluster. E.g. Notebooks, Jobs + Clients ClientsTypes `json:"clients"` +} + +type WorkspaceStorageInfo struct { + // workspace files destination, e.g. + // `/Users/user1@databricks.com/my-init.sh` + Destination string `json:"destination"` +} diff --git a/jobs/v2preview/api.go b/jobs/v2preview/api.go new file mode 100755 index 000000000..1113b9e00 --- /dev/null +++ b/jobs/v2preview/api.go @@ -0,0 +1,485 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Jobs Preview, Policy Compliance For Jobs Preview, etc. +package jobspreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type JobsPreviewInterface interface { + + // Cancel all runs of a job. + // + // Cancels all active runs of a job. The runs are canceled asynchronously, so it + // doesn't prevent new runs from being started. + CancelAllRuns(ctx context.Context, request CancelAllRuns) error + + // Cancel a run. + // + // Cancels a job run or a task run. The run is canceled asynchronously, so it + // may still be running when this request completes. + CancelRun(ctx context.Context, request CancelRun) error + + // Cancel a run. + // + // Cancels a job run or a task run. The run is canceled asynchronously, so it + // may still be running when this request completes. + CancelRunByRunId(ctx context.Context, runId int64) error + + // Create a new job. + // + // Create a new job. + Create(ctx context.Context, request CreateJob) (*CreateResponse, error) + + // Delete a job. + // + // Deletes a job. + Delete(ctx context.Context, request DeleteJob) error + + // Delete a job. + // + // Deletes a job. + DeleteByJobId(ctx context.Context, jobId int64) error + + // Delete a job run. + // + // Deletes a non-active run. Returns an error if the run is active. + DeleteRun(ctx context.Context, request DeleteRun) error + + // Delete a job run. + // + // Deletes a non-active run. Returns an error if the run is active. + DeleteRunByRunId(ctx context.Context, runId int64) error + + // Export and retrieve a job run. + // + // Export and retrieve the job run task. + ExportRun(ctx context.Context, request ExportRunRequest) (*ExportRunOutput, error) + + // Get a single job. + // + // Retrieves the details for a single job. + // + // In Jobs API 2.2, requests for a single job support pagination of `tasks` and + // `job_clusters` when either exceeds 100 elements. Use the `next_page_token` + // field to check for more results and pass its value as the `page_token` in + // subsequent requests. Arrays with fewer than 100 elements in a page will be + // empty on later pages. + Get(ctx context.Context, request GetJobRequest) (*Job, error) + + // Get a single job. + // + // Retrieves the details for a single job. + // + // In Jobs API 2.2, requests for a single job support pagination of `tasks` and + // `job_clusters` when either exceeds 100 elements. Use the `next_page_token` + // field to check for more results and pass its value as the `page_token` in + // subsequent requests. Arrays with fewer than 100 elements in a page will be + // empty on later pages. + GetByJobId(ctx context.Context, jobId int64) (*Job, error) + + // Get job permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetJobPermissionLevelsRequest) (*GetJobPermissionLevelsResponse, error) + + // Get job permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByJobId(ctx context.Context, jobId string) (*GetJobPermissionLevelsResponse, error) + + // Get job permissions. + // + // Gets the permissions of a job. Jobs can inherit permissions from their root + // object. + GetPermissions(ctx context.Context, request GetJobPermissionsRequest) (*JobPermissions, error) + + // Get job permissions. + // + // Gets the permissions of a job. Jobs can inherit permissions from their root + // object. + GetPermissionsByJobId(ctx context.Context, jobId string) (*JobPermissions, error) + + // Get a single job run. + // + // Retrieves the metadata of a run. + // + // In Jobs API 2.2, requests for a single job run support pagination of `tasks` + // and `job_clusters` when either exceeds 100 elements. Use the + // `next_page_token` field to check for more results and pass its value as the + // `page_token` in subsequent requests. Arrays with fewer than 100 elements in a + // page will be empty on later pages. + GetRun(ctx context.Context, request GetRunRequest) (*Run, error) + + // Get the output for a single run. + // + // Retrieve the output and metadata of a single task run. When a notebook task + // returns a value through the `dbutils.notebook.exit()` call, you can use this + // endpoint to retrieve that value. Databricks restricts this API to returning + // the first 5 MB of the output. To return a larger result, you can store job + // results in a cloud storage service. + // + // This endpoint validates that the __run_id__ parameter is valid and returns an + // HTTP status code 400 if the __run_id__ parameter is invalid. Runs are + // automatically removed after 60 days. If you to want to reference them beyond + // 60 days, you must save old run results before they expire. + GetRunOutput(ctx context.Context, request GetRunOutputRequest) (*RunOutput, error) + + // Get the output for a single run. + // + // Retrieve the output and metadata of a single task run. When a notebook task + // returns a value through the `dbutils.notebook.exit()` call, you can use this + // endpoint to retrieve that value. Databricks restricts this API to returning + // the first 5 MB of the output. To return a larger result, you can store job + // results in a cloud storage service. + // + // This endpoint validates that the __run_id__ parameter is valid and returns an + // HTTP status code 400 if the __run_id__ parameter is invalid. Runs are + // automatically removed after 60 days. If you to want to reference them beyond + // 60 days, you must save old run results before they expire. + GetRunOutputByRunId(ctx context.Context, runId int64) (*RunOutput, error) + + // List jobs. + // + // Retrieves a list of jobs. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob] + + // List jobs. + // + // Retrieves a list of jobs. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListJobsRequest) ([]BaseJob, error) + + // BaseJobSettingsNameToJobIdMap calls [JobsPreviewAPI.ListAll] and creates a map of results with [BaseJob].Settings.Name as key and [BaseJob].JobId as value. + // + // Returns an error if there's more than one [BaseJob] with the same .Settings.Name. + // + // Note: All [BaseJob] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + BaseJobSettingsNameToJobIdMap(ctx context.Context, request ListJobsRequest) (map[string]int64, error) + + // GetBySettingsName calls [JobsPreviewAPI.BaseJobSettingsNameToJobIdMap] and returns a single [BaseJob]. + // + // Returns an error if there's more than one [BaseJob] with the same .Settings.Name. + // + // Note: All [BaseJob] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetBySettingsName(ctx context.Context, name string) (*BaseJob, error) + + // List job runs. + // + // List runs in descending order by start time. + // + // This method is generated by Databricks SDK Code Generator. + ListRuns(ctx context.Context, request ListRunsRequest) listing.Iterator[BaseRun] + + // List job runs. + // + // List runs in descending order by start time. + // + // This method is generated by Databricks SDK Code Generator. + ListRunsAll(ctx context.Context, request ListRunsRequest) ([]BaseRun, error) + + // Repair a job run. + // + // Re-run one or more tasks. Tasks are re-run as part of the original job run. + // They use the current job and task settings, and can be viewed in the history + // for the original job run. + RepairRun(ctx context.Context, request RepairRun) (*RepairRunResponse, error) + + // Update all job settings (reset). + // + // Overwrite all settings for the given job. Use the [_Update_ + // endpoint](:method:jobs/update) to update job settings partially. + Reset(ctx context.Context, request ResetJob) error + + // Trigger a new job run. + // + // Run a job and return the `run_id` of the triggered run. + RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error) + + // Set job permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) + + // Create and trigger a one-time run. + // + // Submit a one-time run. This endpoint allows you to submit a workload directly + // without creating a job. Runs submitted using this endpoint don’t display in + // the UI. Use the `jobs/runs/get` API to check the run state after the job is + // submitted. + Submit(ctx context.Context, request SubmitRun) (*SubmitRunResponse, error) + + // Update job settings partially. + // + // Add, update, or remove specific settings of an existing job. Use the [_Reset_ + // endpoint](:method:jobs/reset) to overwrite all job settings. + Update(ctx context.Context, request UpdateJob) error + + // Update job permissions. + // + // Updates the permissions on a job. Jobs can inherit permissions from their + // root object. + UpdatePermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) +} + +func NewJobsPreview(client *client.DatabricksClient) *JobsPreviewAPI { + return &JobsPreviewAPI{ + jobsPreviewImpl: jobsPreviewImpl{ + client: client, + }, + } +} + +// The Jobs API allows you to create, edit, and delete jobs. +// +// You can use a Databricks job to run a data processing or data analysis task +// in a Databricks cluster with scalable resources. Your job can consist of a +// single task or can be a large, multi-task workflow with complex dependencies. +// Databricks manages the task orchestration, cluster management, monitoring, +// and error reporting for all of your jobs. You can run your jobs immediately +// or periodically through an easy-to-use scheduling system. You can implement +// job tasks using notebooks, JARS, Delta Live Tables pipelines, or Python, +// Scala, Spark submit, and Java applications. +// +// You should never hard code secrets or store them in plain text. Use the +// [Secrets CLI] to manage secrets in the [Databricks CLI]. Use the [Secrets +// utility] to reference secrets in notebooks and jobs. +// +// [Databricks CLI]: https://docs.databricks.com/dev-tools/cli/index.html +// [Secrets CLI]: https://docs.databricks.com/dev-tools/cli/secrets-cli.html +// [Secrets utility]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-secrets +type JobsPreviewAPI struct { + jobsPreviewImpl +} + +// Cancel a run. +// +// Cancels a job run or a task run. The run is canceled asynchronously, so it +// may still be running when this request completes. +func (a *JobsPreviewAPI) CancelRunByRunId(ctx context.Context, runId int64) error { + return a.jobsPreviewImpl.CancelRun(ctx, CancelRun{ + RunId: runId, + }) +} + +// Delete a job. +// +// Deletes a job. +func (a *JobsPreviewAPI) DeleteByJobId(ctx context.Context, jobId int64) error { + return a.jobsPreviewImpl.Delete(ctx, DeleteJob{ + JobId: jobId, + }) +} + +// Delete a job run. +// +// Deletes a non-active run. Returns an error if the run is active. +func (a *JobsPreviewAPI) DeleteRunByRunId(ctx context.Context, runId int64) error { + return a.jobsPreviewImpl.DeleteRun(ctx, DeleteRun{ + RunId: runId, + }) +} + +// Get a single job. +// +// Retrieves the details for a single job. +// +// In Jobs API 2.2, requests for a single job support pagination of `tasks` and +// `job_clusters` when either exceeds 100 elements. Use the `next_page_token` +// field to check for more results and pass its value as the `page_token` in +// subsequent requests. Arrays with fewer than 100 elements in a page will be +// empty on later pages. +func (a *JobsPreviewAPI) GetByJobId(ctx context.Context, jobId int64) (*Job, error) { + return a.jobsPreviewImpl.Get(ctx, GetJobRequest{ + JobId: jobId, + }) +} + +// Get job permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *JobsPreviewAPI) GetPermissionLevelsByJobId(ctx context.Context, jobId string) (*GetJobPermissionLevelsResponse, error) { + return a.jobsPreviewImpl.GetPermissionLevels(ctx, GetJobPermissionLevelsRequest{ + JobId: jobId, + }) +} + +// Get job permissions. +// +// Gets the permissions of a job. Jobs can inherit permissions from their root +// object. +func (a *JobsPreviewAPI) GetPermissionsByJobId(ctx context.Context, jobId string) (*JobPermissions, error) { + return a.jobsPreviewImpl.GetPermissions(ctx, GetJobPermissionsRequest{ + JobId: jobId, + }) +} + +// Get the output for a single run. +// +// Retrieve the output and metadata of a single task run. When a notebook task +// returns a value through the `dbutils.notebook.exit()` call, you can use this +// endpoint to retrieve that value. Databricks restricts this API to returning +// the first 5 MB of the output. To return a larger result, you can store job +// results in a cloud storage service. +// +// This endpoint validates that the __run_id__ parameter is valid and returns an +// HTTP status code 400 if the __run_id__ parameter is invalid. Runs are +// automatically removed after 60 days. If you to want to reference them beyond +// 60 days, you must save old run results before they expire. +func (a *JobsPreviewAPI) GetRunOutputByRunId(ctx context.Context, runId int64) (*RunOutput, error) { + return a.jobsPreviewImpl.GetRunOutput(ctx, GetRunOutputRequest{ + RunId: runId, + }) +} + +// BaseJobSettingsNameToJobIdMap calls [JobsPreviewAPI.ListAll] and creates a map of results with [BaseJob].Settings.Name as key and [BaseJob].JobId as value. +// +// Returns an error if there's more than one [BaseJob] with the same .Settings.Name. +// +// Note: All [BaseJob] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *JobsPreviewAPI) BaseJobSettingsNameToJobIdMap(ctx context.Context, request ListJobsRequest) (map[string]int64, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]int64{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Settings.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Settings.Name: %s", key) + } + mapping[key] = v.JobId + } + return mapping, nil +} + +// GetBySettingsName calls [JobsPreviewAPI.BaseJobSettingsNameToJobIdMap] and returns a single [BaseJob]. +// +// Returns an error if there's more than one [BaseJob] with the same .Settings.Name. +// +// Note: All [BaseJob] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *JobsPreviewAPI) GetBySettingsName(ctx context.Context, name string) (*BaseJob, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListJobsRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]BaseJob{} + for _, v := range result { + key := v.Settings.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("BaseJob named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of BaseJob named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type PolicyComplianceForJobsPreviewInterface interface { + + // Enforce job policy compliance. + // + // Updates a job so the job clusters that are created when running the job + // (specified in `new_cluster`) are compliant with the current versions of their + // respective cluster policies. All-purpose clusters used in the job will not be + // updated. + EnforceCompliance(ctx context.Context, request EnforcePolicyComplianceRequest) (*EnforcePolicyComplianceResponse, error) + + // Get job policy compliance. + // + // Returns the policy compliance status of a job. Jobs could be out of + // compliance if a cluster policy they use was updated after the job was last + // edited and some of its job clusters no longer comply with their updated + // policies. + GetCompliance(ctx context.Context, request GetPolicyComplianceRequest) (*GetPolicyComplianceResponse, error) + + // Get job policy compliance. + // + // Returns the policy compliance status of a job. Jobs could be out of + // compliance if a cluster policy they use was updated after the job was last + // edited and some of its job clusters no longer comply with their updated + // policies. + GetComplianceByJobId(ctx context.Context, jobId int64) (*GetPolicyComplianceResponse, error) + + // List job policy compliance. + // + // Returns the policy compliance status of all jobs that use a given policy. + // Jobs could be out of compliance if a cluster policy they use was updated + // after the job was last edited and its job clusters no longer comply with the + // updated policy. + // + // This method is generated by Databricks SDK Code Generator. + ListCompliance(ctx context.Context, request ListJobComplianceRequest) listing.Iterator[JobCompliance] + + // List job policy compliance. + // + // Returns the policy compliance status of all jobs that use a given policy. + // Jobs could be out of compliance if a cluster policy they use was updated + // after the job was last edited and its job clusters no longer comply with the + // updated policy. + // + // This method is generated by Databricks SDK Code Generator. + ListComplianceAll(ctx context.Context, request ListJobComplianceRequest) ([]JobCompliance, error) +} + +func NewPolicyComplianceForJobsPreview(client *client.DatabricksClient) *PolicyComplianceForJobsPreviewAPI { + return &PolicyComplianceForJobsPreviewAPI{ + policyComplianceForJobsPreviewImpl: policyComplianceForJobsPreviewImpl{ + client: client, + }, + } +} + +// The compliance APIs allow you to view and manage the policy compliance status +// of jobs in your workspace. This API currently only supports compliance +// controls for cluster policies. +// +// A job is in compliance if its cluster configurations satisfy the rules of all +// their respective cluster policies. A job could be out of compliance if a +// cluster policy it uses was updated after the job was last edited. The job is +// considered out of compliance if any of its clusters no longer comply with +// their updated policies. +// +// The get and list compliance APIs allow you to view the policy compliance +// status of a job. The enforce compliance API allows you to update a job so +// that it becomes compliant with all of its policies. +type PolicyComplianceForJobsPreviewAPI struct { + policyComplianceForJobsPreviewImpl +} + +// Get job policy compliance. +// +// Returns the policy compliance status of a job. Jobs could be out of +// compliance if a cluster policy they use was updated after the job was last +// edited and some of its job clusters no longer comply with their updated +// policies. +func (a *PolicyComplianceForJobsPreviewAPI) GetComplianceByJobId(ctx context.Context, jobId int64) (*GetPolicyComplianceResponse, error) { + return a.policyComplianceForJobsPreviewImpl.GetCompliance(ctx, GetPolicyComplianceRequest{ + JobId: jobId, + }) +} diff --git a/jobs/v2preview/client.go b/jobs/v2preview/client.go new file mode 100755 index 000000000..888215a5a --- /dev/null +++ b/jobs/v2preview/client.go @@ -0,0 +1,79 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package jobspreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type JobsPreviewClient struct { + JobsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewJobsPreviewClient(cfg *config.Config) (*JobsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &JobsPreviewClient{ + Config: cfg, + apiClient: apiClient, + JobsPreviewInterface: NewJobsPreview(databricksClient), + }, nil +} + +type PolicyComplianceForJobsPreviewClient struct { + PolicyComplianceForJobsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewPolicyComplianceForJobsPreviewClient(cfg *config.Config) (*PolicyComplianceForJobsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &PolicyComplianceForJobsPreviewClient{ + Config: cfg, + apiClient: apiClient, + PolicyComplianceForJobsPreviewInterface: NewPolicyComplianceForJobsPreview(databricksClient), + }, nil +} diff --git a/jobs/v2preview/impl.go b/jobs/v2preview/impl.go new file mode 100755 index 000000000..cd7558d33 --- /dev/null +++ b/jobs/v2preview/impl.go @@ -0,0 +1,374 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package jobspreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just JobsPreview API methods +type jobsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *jobsPreviewImpl) CancelAllRuns(ctx context.Context, request CancelAllRuns) error { + var cancelAllRunsResponse CancelAllRunsResponse + path := "/api/2.2preview/jobs/runs/cancel-all" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &cancelAllRunsResponse) + return err +} + +func (a *jobsPreviewImpl) CancelRun(ctx context.Context, request CancelRun) error { + var cancelRunResponse CancelRunResponse + path := "/api/2.2preview/jobs/runs/cancel" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &cancelRunResponse) + return err +} + +func (a *jobsPreviewImpl) Create(ctx context.Context, request CreateJob) (*CreateResponse, error) { + var createResponse CreateResponse + path := "/api/2.2preview/jobs/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createResponse) + return &createResponse, err +} + +func (a *jobsPreviewImpl) Delete(ctx context.Context, request DeleteJob) error { + var deleteResponse DeleteResponse + path := "/api/2.2preview/jobs/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *jobsPreviewImpl) DeleteRun(ctx context.Context, request DeleteRun) error { + var deleteRunResponse DeleteRunResponse + path := "/api/2.2preview/jobs/runs/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteRunResponse) + return err +} + +func (a *jobsPreviewImpl) ExportRun(ctx context.Context, request ExportRunRequest) (*ExportRunOutput, error) { + var exportRunOutput ExportRunOutput + path := "/api/2.2preview/jobs/runs/export" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &exportRunOutput) + return &exportRunOutput, err +} + +func (a *jobsPreviewImpl) Get(ctx context.Context, request GetJobRequest) (*Job, error) { + var job Job + path := "/api/2.2preview/jobs/get" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &job) + return &job, err +} + +func (a *jobsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetJobPermissionLevelsRequest) (*GetJobPermissionLevelsResponse, error) { + var getJobPermissionLevelsResponse GetJobPermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/jobs/%v/permissionLevels", request.JobId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getJobPermissionLevelsResponse) + return &getJobPermissionLevelsResponse, err +} + +func (a *jobsPreviewImpl) GetPermissions(ctx context.Context, request GetJobPermissionsRequest) (*JobPermissions, error) { + var jobPermissions JobPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/jobs/%v", request.JobId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &jobPermissions) + return &jobPermissions, err +} + +func (a *jobsPreviewImpl) GetRun(ctx context.Context, request GetRunRequest) (*Run, error) { + var run Run + path := "/api/2.2preview/jobs/runs/get" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &run) + return &run, err +} + +func (a *jobsPreviewImpl) GetRunOutput(ctx context.Context, request GetRunOutputRequest) (*RunOutput, error) { + var runOutput RunOutput + path := "/api/2.2preview/jobs/runs/get-output" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &runOutput) + return &runOutput, err +} + +// List jobs. +// +// Retrieves a list of jobs. +func (a *jobsPreviewImpl) List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob] { + + getNextPage := func(ctx context.Context, req ListJobsRequest) (*ListJobsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListJobsResponse) []BaseJob { + return resp.Jobs + } + getNextReq := func(resp *ListJobsResponse) *ListJobsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List jobs. +// +// Retrieves a list of jobs. +func (a *jobsPreviewImpl) ListAll(ctx context.Context, request ListJobsRequest) ([]BaseJob, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[BaseJob](ctx, iterator) +} +func (a *jobsPreviewImpl) internalList(ctx context.Context, request ListJobsRequest) (*ListJobsResponse, error) { + var listJobsResponse ListJobsResponse + path := "/api/2.2preview/jobs/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listJobsResponse) + return &listJobsResponse, err +} + +// List job runs. +// +// List runs in descending order by start time. +func (a *jobsPreviewImpl) ListRuns(ctx context.Context, request ListRunsRequest) listing.Iterator[BaseRun] { + + getNextPage := func(ctx context.Context, req ListRunsRequest) (*ListRunsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListRuns(ctx, req) + } + getItems := func(resp *ListRunsResponse) []BaseRun { + return resp.Runs + } + getNextReq := func(resp *ListRunsResponse) *ListRunsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List job runs. +// +// List runs in descending order by start time. +func (a *jobsPreviewImpl) ListRunsAll(ctx context.Context, request ListRunsRequest) ([]BaseRun, error) { + iterator := a.ListRuns(ctx, request) + return listing.ToSlice[BaseRun](ctx, iterator) +} +func (a *jobsPreviewImpl) internalListRuns(ctx context.Context, request ListRunsRequest) (*ListRunsResponse, error) { + var listRunsResponse ListRunsResponse + path := "/api/2.2preview/jobs/runs/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listRunsResponse) + return &listRunsResponse, err +} + +func (a *jobsPreviewImpl) RepairRun(ctx context.Context, request RepairRun) (*RepairRunResponse, error) { + var repairRunResponse RepairRunResponse + path := "/api/2.2preview/jobs/runs/repair" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &repairRunResponse) + return &repairRunResponse, err +} + +func (a *jobsPreviewImpl) Reset(ctx context.Context, request ResetJob) error { + var resetResponse ResetResponse + path := "/api/2.2preview/jobs/reset" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &resetResponse) + return err +} + +func (a *jobsPreviewImpl) RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error) { + var runNowResponse RunNowResponse + path := "/api/2.2preview/jobs/run-now" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &runNowResponse) + return &runNowResponse, err +} + +func (a *jobsPreviewImpl) SetPermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) { + var jobPermissions JobPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/jobs/%v", request.JobId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &jobPermissions) + return &jobPermissions, err +} + +func (a *jobsPreviewImpl) Submit(ctx context.Context, request SubmitRun) (*SubmitRunResponse, error) { + var submitRunResponse SubmitRunResponse + path := "/api/2.2preview/jobs/runs/submit" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &submitRunResponse) + return &submitRunResponse, err +} + +func (a *jobsPreviewImpl) Update(ctx context.Context, request UpdateJob) error { + var updateResponse UpdateResponse + path := "/api/2.2preview/jobs/update" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &updateResponse) + return err +} + +func (a *jobsPreviewImpl) UpdatePermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) { + var jobPermissions JobPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/jobs/%v", request.JobId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &jobPermissions) + return &jobPermissions, err +} + +// unexported type that holds implementations of just PolicyComplianceForJobsPreview API methods +type policyComplianceForJobsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *policyComplianceForJobsPreviewImpl) EnforceCompliance(ctx context.Context, request EnforcePolicyComplianceRequest) (*EnforcePolicyComplianceResponse, error) { + var enforcePolicyComplianceResponse EnforcePolicyComplianceResponse + path := "/api/2.0preview/policies/jobs/enforce-compliance" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &enforcePolicyComplianceResponse) + return &enforcePolicyComplianceResponse, err +} + +func (a *policyComplianceForJobsPreviewImpl) GetCompliance(ctx context.Context, request GetPolicyComplianceRequest) (*GetPolicyComplianceResponse, error) { + var getPolicyComplianceResponse GetPolicyComplianceResponse + path := "/api/2.0preview/policies/jobs/get-compliance" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPolicyComplianceResponse) + return &getPolicyComplianceResponse, err +} + +// List job policy compliance. +// +// Returns the policy compliance status of all jobs that use a given policy. +// Jobs could be out of compliance if a cluster policy they use was updated +// after the job was last edited and its job clusters no longer comply with the +// updated policy. +func (a *policyComplianceForJobsPreviewImpl) ListCompliance(ctx context.Context, request ListJobComplianceRequest) listing.Iterator[JobCompliance] { + + getNextPage := func(ctx context.Context, req ListJobComplianceRequest) (*ListJobComplianceForPolicyResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListCompliance(ctx, req) + } + getItems := func(resp *ListJobComplianceForPolicyResponse) []JobCompliance { + return resp.Jobs + } + getNextReq := func(resp *ListJobComplianceForPolicyResponse) *ListJobComplianceRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List job policy compliance. +// +// Returns the policy compliance status of all jobs that use a given policy. +// Jobs could be out of compliance if a cluster policy they use was updated +// after the job was last edited and its job clusters no longer comply with the +// updated policy. +func (a *policyComplianceForJobsPreviewImpl) ListComplianceAll(ctx context.Context, request ListJobComplianceRequest) ([]JobCompliance, error) { + iterator := a.ListCompliance(ctx, request) + return listing.ToSlice[JobCompliance](ctx, iterator) +} +func (a *policyComplianceForJobsPreviewImpl) internalListCompliance(ctx context.Context, request ListJobComplianceRequest) (*ListJobComplianceForPolicyResponse, error) { + var listJobComplianceForPolicyResponse ListJobComplianceForPolicyResponse + path := "/api/2.0preview/policies/jobs/list-compliance" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listJobComplianceForPolicyResponse) + return &listJobComplianceForPolicyResponse, err +} diff --git a/jobs/v2preview/model.go b/jobs/v2preview/model.go new file mode 100755 index 000000000..8edd5ef1f --- /dev/null +++ b/jobs/v2preview/model.go @@ -0,0 +1,6086 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package jobspreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type Adlsgen2Info struct { + // abfss destination, e.g. + // `abfss://@.dfs.core.windows.net/`. + Destination string `json:"destination"` +} + +type AutoScale struct { + // The maximum number of workers to which the cluster can scale up when + // overloaded. Note that `max_workers` must be strictly greater than + // `min_workers`. + MaxWorkers int `json:"max_workers,omitempty"` + // The minimum number of workers to which the cluster can scale down when + // underutilized. It is also the initial number of workers the cluster will + // have after creation. + MinWorkers int `json:"min_workers,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AutoScale) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AutoScale) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AwsAttributes struct { + // Availability type used for all subsequent nodes past the + // `first_on_demand` ones. + // + // Note: If `first_on_demand` is zero, this availability type will be used + // for the entire cluster. + Availability AwsAvailability `json:"availability,omitempty"` + // The number of volumes launched for each instance. Users can choose up to + // 10 volumes. This feature is only enabled for supported node types. Legacy + // node types cannot specify custom EBS volumes. For node types with no + // instance store, at least one EBS volume needs to be specified; otherwise, + // cluster creation will fail. + // + // These EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc. Instance + // store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc. + // + // If EBS volumes are attached, Databricks will configure Spark to use only + // the EBS volumes for scratch storage because heterogenously sized scratch + // devices can lead to inefficient disk utilization. If no EBS volumes are + // attached, Databricks will configure Spark to use instance store volumes. + // + // Please note that if EBS volumes are specified, then the Spark + // configuration `spark.local.dir` will be overridden. + EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + // If using gp3 volumes, what IOPS to use for the disk. If this is not set, + // the maximum performance of a gp2 volume with the same volume size will be + // used. + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` + // The size of each EBS volume (in GiB) launched for each instance. For + // general purpose SSD, this value must be within the range 100 - 4096. For + // throughput optimized HDD, this value must be within the range 500 - 4096. + EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + // If using gp3 volumes, what throughput to use for the disk. If this is not + // set, the maximum performance of a gp2 volume with the same volume size + // will be used. + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` + // The type of EBS volumes that will be launched with this cluster. + EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty"` + // The first `first_on_demand` nodes of the cluster will be placed on + // on-demand instances. If this value is greater than 0, the cluster driver + // node in particular will be placed on an on-demand instance. If this value + // is greater than or equal to the current cluster size, all nodes will be + // placed on on-demand instances. If this value is less than the current + // cluster size, `first_on_demand` nodes will be placed on on-demand + // instances and the remainder will be placed on `availability` instances. + // Note that this value does not affect cluster size and cannot currently be + // mutated over the lifetime of a cluster. + FirstOnDemand int `json:"first_on_demand,omitempty"` + // Nodes for this cluster will only be placed on AWS instances with this + // instance profile. If ommitted, nodes will be placed on instances without + // an IAM instance profile. The instance profile must have previously been + // added to the Databricks environment by an account administrator. + // + // This feature may only be available to certain customer plans. + // + // If this field is ommitted, we will pull in the default from the conf if + // it exists. + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // The bid price for AWS spot instances, as a percentage of the + // corresponding instance type's on-demand price. For example, if this field + // is set to 50, and the cluster needs a new `r3.xlarge` spot instance, then + // the bid price is half of the price of on-demand `r3.xlarge` instances. + // Similarly, if this field is set to 200, the bid price is twice the price + // of on-demand `r3.xlarge` instances. If not specified, the default value + // is 100. When spot instances are requested for this cluster, only spot + // instances whose bid price percentage matches this field will be + // considered. Note that, for safety, we enforce this field to be no more + // than 10000. + // + // The default value and documentation here should be kept consistent with + // CommonConf.defaultSpotBidPricePercent and + // CommonConf.maxSpotBidPricePercent. + SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` + // Identifier for the availability zone/datacenter in which the cluster + // resides. This string will be of a form like "us-west-2a". The provided + // availability zone must be in the same region as the Databricks + // deployment. For example, "us-west-2a" is not a valid zone id if the + // Databricks deployment resides in the "us-east-1" region. This is an + // optional field at cluster creation, and if not specified, a default zone + // will be used. If the zone specified is "auto", will try to place cluster + // in a zone with high availability, and will retry placement in a different + // AZ if there is not enough capacity. The list of available zones as well + // as the default value can be found by using the `List Zones` method. + ZoneId string `json:"zone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AwsAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AwsAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Availability type used for all subsequent nodes past the `first_on_demand` +// ones. +// +// Note: If `first_on_demand` is zero, this availability type will be used for +// the entire cluster. +type AwsAvailability string + +const AwsAvailabilityOnDemand AwsAvailability = `ON_DEMAND` + +const AwsAvailabilitySpot AwsAvailability = `SPOT` + +const AwsAvailabilitySpotWithFallback AwsAvailability = `SPOT_WITH_FALLBACK` + +// String representation for [fmt.Print] +func (f *AwsAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AwsAvailability) Set(v string) error { + switch v { + case `ON_DEMAND`, `SPOT`, `SPOT_WITH_FALLBACK`: + *f = AwsAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND", "SPOT", "SPOT_WITH_FALLBACK"`, v) + } +} + +// Type always returns AwsAvailability to satisfy [pflag.Value] interface +func (f *AwsAvailability) Type() string { + return "AwsAvailability" +} + +type AzureAttributes struct { + // Availability type used for all subsequent nodes past the + // `first_on_demand` ones. Note: If `first_on_demand` is zero (which only + // happens on pool clusters), this availability type will be used for the + // entire cluster. + Availability AzureAvailability `json:"availability,omitempty"` + // The first `first_on_demand` nodes of the cluster will be placed on + // on-demand instances. This value should be greater than 0, to make sure + // the cluster driver node is placed on an on-demand instance. If this value + // is greater than or equal to the current cluster size, all nodes will be + // placed on on-demand instances. If this value is less than the current + // cluster size, `first_on_demand` nodes will be placed on on-demand + // instances and the remainder will be placed on `availability` instances. + // Note that this value does not affect cluster size and cannot currently be + // mutated over the lifetime of a cluster. + FirstOnDemand int `json:"first_on_demand,omitempty"` + // Defines values necessary to configure and run Azure Log Analytics agent + LogAnalyticsInfo *LogAnalyticsInfo `json:"log_analytics_info,omitempty"` + // The max bid price to be used for Azure spot instances. The Max price for + // the bid cannot be higher than the on-demand price of the instance. If not + // specified, the default value is -1, which specifies that the instance + // cannot be evicted on the basis of price, and only on the basis of + // availability. Further, the value should > 0 or -1. + SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AzureAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AzureAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Availability type used for all subsequent nodes past the `first_on_demand` +// ones. Note: If `first_on_demand` is zero (which only happens on pool +// clusters), this availability type will be used for the entire cluster. +type AzureAvailability string + +const AzureAvailabilityOnDemandAzure AzureAvailability = `ON_DEMAND_AZURE` + +const AzureAvailabilitySpotAzure AzureAvailability = `SPOT_AZURE` + +const AzureAvailabilitySpotWithFallbackAzure AzureAvailability = `SPOT_WITH_FALLBACK_AZURE` + +// String representation for [fmt.Print] +func (f *AzureAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AzureAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_AZURE`, `SPOT_AZURE`, `SPOT_WITH_FALLBACK_AZURE`: + *f = AzureAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_AZURE", "SPOT_AZURE", "SPOT_WITH_FALLBACK_AZURE"`, v) + } +} + +// Type always returns AzureAvailability to satisfy [pflag.Value] interface +func (f *AzureAvailability) Type() string { + return "AzureAvailability" +} + +type BaseJob struct { + // The time at which this job was created in epoch milliseconds + // (milliseconds since 1/1/1970 UTC). + CreatedTime int64 `json:"created_time,omitempty"` + // The creator user name. This field won’t be included in the response if + // the user has already been deleted. + CreatorUserName string `json:"creator_user_name,omitempty"` + // The id of the budget policy used by this job for cost attribution + // purposes. This may be set through (in order of precedence): 1. Budget + // admins through the account or workspace console 2. Jobs UI in the job + // details page and Jobs API using `budget_policy_id` 3. Inferred default + // based on accessible budget policies of the run_as identity on job + // creation or modification. + EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` + // Indicates if the job has more sub-resources (`tasks`, `job_clusters`) + // that are not shown. They can be accessed via :method:jobs/get endpoint. + // It is only relevant for API 2.2 :method:jobs/list requests with + // `expand_tasks=true`. + HasMore bool `json:"has_more,omitempty"` + // The canonical identifier for this job. + JobId int64 `json:"job_id,omitempty"` + // Settings for this job and all of its runs. These settings can be updated + // using the `resetJob` method. + Settings *JobSettings `json:"settings,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *BaseJob) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s BaseJob) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type BaseRun struct { + // The sequence number of this run attempt for a triggered job run. The + // initial attempt of a run has an attempt_number of 0. If the initial run + // attempt fails, and the job has a retry policy (`max_retries` > 0), + // subsequent runs are created with an `original_attempt_run_id` of the + // original attempt’s ID and an incrementing `attempt_number`. Runs are + // retried only until they succeed, and the maximum `attempt_number` is the + // same as the `max_retries` value for the job. + AttemptNumber int `json:"attempt_number,omitempty"` + // The time in milliseconds it took to terminate the cluster and clean up + // any associated artifacts. The duration of a task run is the sum of the + // `setup_duration`, `execution_duration`, and the `cleanup_duration`. The + // `cleanup_duration` field is set to 0 for multitask job runs. The total + // duration of a multitask job run is the value of the `run_duration` field. + CleanupDuration int64 `json:"cleanup_duration,omitempty"` + // The cluster used for this run. If the run is specified to use a new + // cluster, this field is set once the Jobs service has requested a cluster + // for the run. + ClusterInstance *ClusterInstance `json:"cluster_instance,omitempty"` + // A snapshot of the job’s cluster specification when this run was + // created. + ClusterSpec *ClusterSpec `json:"cluster_spec,omitempty"` + // The creator user name. This field won’t be included in the response if + // the user has already been deleted. + CreatorUserName string `json:"creator_user_name,omitempty"` + // Description of the run + Description string `json:"description,omitempty"` + // effective_performance_target is the actual performance target used by the + // run during execution. effective_performance_target can differ from + // performance_target depending on if the job was eligible to be + // cost-optimized (e.g. contains at least 1 serverless task) or if we + // specifically override the value for the run (ex. RunNow). + EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` + // The time at which this run ended in epoch milliseconds (milliseconds + // since 1/1/1970 UTC). This field is set to 0 if the job is still running. + EndTime int64 `json:"end_time,omitempty"` + // The time in milliseconds it took to execute the commands in the JAR or + // notebook until they completed, failed, timed out, were cancelled, or + // encountered an unexpected error. The duration of a task run is the sum of + // the `setup_duration`, `execution_duration`, and the `cleanup_duration`. + // The `execution_duration` field is set to 0 for multitask job runs. The + // total duration of a multitask job run is the value of the `run_duration` + // field. + ExecutionDuration int64 `json:"execution_duration,omitempty"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. + // + // If `git_source` is set, these tasks retrieve the file from the remote + // repository by default. However, this behavior can be overridden by + // setting `source` to `WORKSPACE` on the task. + // + // Note: dbt and SQL File tasks support only version-controlled sources. If + // dbt or SQL File tasks are used, `git_source` must be defined on the job. + GitSource *GitSource `json:"git_source,omitempty"` + // Indicates if the run has more sub-resources (`tasks`, `job_clusters`) + // that are not shown. They can be accessed via :method:jobs/getrun + // endpoint. It is only relevant for API 2.2 :method:jobs/listruns requests + // with `expand_tasks=true`. + HasMore bool `json:"has_more,omitempty"` + // A list of job cluster specifications that can be shared and reused by + // tasks of this job. Libraries cannot be declared in a shared job cluster. + // You must declare dependent libraries in task settings. If more than 100 + // job clusters are available, you can paginate through them using + // :method:jobs/getrun. + JobClusters []JobCluster `json:"job_clusters,omitempty"` + // The canonical identifier of the job that contains this run. + JobId int64 `json:"job_id,omitempty"` + // Job-level parameters used in the run + JobParameters []JobParameter `json:"job_parameters,omitempty"` + // ID of the job run that this run belongs to. For legacy and single-task + // job runs the field is populated with the job run ID. For task runs, the + // field is populated with the ID of the job run that the task run belongs + // to. + JobRunId int64 `json:"job_run_id,omitempty"` + // A unique identifier for this job run. This is set to the same value as + // `run_id`. + NumberInJob int64 `json:"number_in_job,omitempty"` + // If this run is a retry of a prior run attempt, this field contains the + // run_id of the original attempt; otherwise, it is the same as the run_id. + OriginalAttemptRunId int64 `json:"original_attempt_run_id,omitempty"` + // The parameters used for this run. + OverridingParameters *RunParameters `json:"overriding_parameters,omitempty"` + // The time in milliseconds that the run has spent in the queue. + QueueDuration int64 `json:"queue_duration,omitempty"` + // The repair history of the run. + RepairHistory []RepairHistoryItem `json:"repair_history,omitempty"` + // The time in milliseconds it took the job run and all of its repairs to + // finish. + RunDuration int64 `json:"run_duration,omitempty"` + // The canonical identifier of the run. This ID is unique across all runs of + // all jobs. + RunId int64 `json:"run_id,omitempty"` + // An optional name for the run. The maximum length is 4096 bytes in UTF-8 + // encoding. + RunName string `json:"run_name,omitempty"` + // The URL to the detail page of the run. + RunPageUrl string `json:"run_page_url,omitempty"` + // The type of a run. * `JOB_RUN`: Normal job run. A run created with + // :method:jobs/runNow. * `WORKFLOW_RUN`: Workflow run. A run created with + // [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit run. A run created with + // :method:jobs/submit. + // + // [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow + RunType RunType `json:"run_type,omitempty"` + // The cron schedule that triggered this run if it was triggered by the + // periodic scheduler. + Schedule *CronSchedule `json:"schedule,omitempty"` + // The time in milliseconds it took to set up the cluster. For runs that run + // on new clusters this is the cluster creation time, for runs that run on + // existing clusters this time should be very short. The duration of a task + // run is the sum of the `setup_duration`, `execution_duration`, and the + // `cleanup_duration`. The `setup_duration` field is set to 0 for multitask + // job runs. The total duration of a multitask job run is the value of the + // `run_duration` field. + SetupDuration int64 `json:"setup_duration,omitempty"` + // The time at which this run was started in epoch milliseconds + // (milliseconds since 1/1/1970 UTC). This may not be the time when the job + // task starts executing, for example, if the job is scheduled to run on a + // new cluster, this is the time the cluster creation call is issued. + StartTime int64 `json:"start_time,omitempty"` + // Deprecated. Please use the `status` field instead. + State *RunState `json:"state,omitempty"` + // The current status of the run + Status *RunStatus `json:"status,omitempty"` + // The list of tasks performed by the run. Each task has its own `run_id` + // which you can use to call `JobsGetOutput` to retrieve the run resutls. If + // more than 100 tasks are available, you can paginate through them using + // :method:jobs/getrun. Use the `next_page_token` field at the object root + // to determine if more results are available. + Tasks []RunTask `json:"tasks,omitempty"` + // The type of trigger that fired this run. + // + // * `PERIODIC`: Schedules that periodically trigger runs, such as a cron + // scheduler. * `ONE_TIME`: One time triggers that fire a single run. This + // occurs you triggered a single run on demand through the UI or the API. * + // `RETRY`: Indicates a run that is triggered as a retry of a previously + // failed run. This occurs when you request to re-run the job in case of + // failures. * `RUN_JOB_TASK`: Indicates a run that is triggered using a Run + // Job task. * `FILE_ARRIVAL`: Indicates a run that is triggered by a file + // arrival. * `TABLE`: Indicates a run that is triggered by a table update. + // * `CONTINUOUS_RESTART`: Indicates a run created by user to manually + // restart a continuous job run. + Trigger TriggerType `json:"trigger,omitempty"` + // Additional details about what triggered the run + TriggerInfo *TriggerInfo `json:"trigger_info,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *BaseRun) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s BaseRun) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CancelAllRuns struct { + // Optional boolean parameter to cancel all queued runs. If no job_id is + // provided, all queued runs in the workspace are canceled. + AllQueuedRuns bool `json:"all_queued_runs,omitempty"` + // The canonical identifier of the job to cancel all runs of. + JobId int64 `json:"job_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CancelAllRuns) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CancelAllRuns) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CancelAllRunsResponse struct { +} + +type CancelRun struct { + // This field is required. + RunId int64 `json:"run_id"` +} + +type CancelRunResponse struct { +} + +// Copied from elastic-spark-common/api/messages/runs.proto. Using the original +// definition to remove coupling with jobs API definition +type CleanRoomTaskRunLifeCycleState string + +const CleanRoomTaskRunLifeCycleStateBlocked CleanRoomTaskRunLifeCycleState = `BLOCKED` + +const CleanRoomTaskRunLifeCycleStateInternalError CleanRoomTaskRunLifeCycleState = `INTERNAL_ERROR` + +const CleanRoomTaskRunLifeCycleStatePending CleanRoomTaskRunLifeCycleState = `PENDING` + +const CleanRoomTaskRunLifeCycleStateQueued CleanRoomTaskRunLifeCycleState = `QUEUED` + +const CleanRoomTaskRunLifeCycleStateRunning CleanRoomTaskRunLifeCycleState = `RUNNING` + +const CleanRoomTaskRunLifeCycleStateRunLifeCycleStateUnspecified CleanRoomTaskRunLifeCycleState = `RUN_LIFE_CYCLE_STATE_UNSPECIFIED` + +const CleanRoomTaskRunLifeCycleStateSkipped CleanRoomTaskRunLifeCycleState = `SKIPPED` + +const CleanRoomTaskRunLifeCycleStateTerminated CleanRoomTaskRunLifeCycleState = `TERMINATED` + +const CleanRoomTaskRunLifeCycleStateTerminating CleanRoomTaskRunLifeCycleState = `TERMINATING` + +const CleanRoomTaskRunLifeCycleStateWaitingForRetry CleanRoomTaskRunLifeCycleState = `WAITING_FOR_RETRY` + +// String representation for [fmt.Print] +func (f *CleanRoomTaskRunLifeCycleState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CleanRoomTaskRunLifeCycleState) Set(v string) error { + switch v { + case `BLOCKED`, `INTERNAL_ERROR`, `PENDING`, `QUEUED`, `RUNNING`, `RUN_LIFE_CYCLE_STATE_UNSPECIFIED`, `SKIPPED`, `TERMINATED`, `TERMINATING`, `WAITING_FOR_RETRY`: + *f = CleanRoomTaskRunLifeCycleState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BLOCKED", "INTERNAL_ERROR", "PENDING", "QUEUED", "RUNNING", "RUN_LIFE_CYCLE_STATE_UNSPECIFIED", "SKIPPED", "TERMINATED", "TERMINATING", "WAITING_FOR_RETRY"`, v) + } +} + +// Type always returns CleanRoomTaskRunLifeCycleState to satisfy [pflag.Value] interface +func (f *CleanRoomTaskRunLifeCycleState) Type() string { + return "CleanRoomTaskRunLifeCycleState" +} + +// Copied from elastic-spark-common/api/messages/runs.proto. Using the original +// definition to avoid cyclic dependency. +type CleanRoomTaskRunResultState string + +const CleanRoomTaskRunResultStateCanceled CleanRoomTaskRunResultState = `CANCELED` + +const CleanRoomTaskRunResultStateDisabled CleanRoomTaskRunResultState = `DISABLED` + +const CleanRoomTaskRunResultStateEvicted CleanRoomTaskRunResultState = `EVICTED` + +const CleanRoomTaskRunResultStateExcluded CleanRoomTaskRunResultState = `EXCLUDED` + +const CleanRoomTaskRunResultStateFailed CleanRoomTaskRunResultState = `FAILED` + +const CleanRoomTaskRunResultStateMaximumConcurrentRunsReached CleanRoomTaskRunResultState = `MAXIMUM_CONCURRENT_RUNS_REACHED` + +const CleanRoomTaskRunResultStateRunResultStateUnspecified CleanRoomTaskRunResultState = `RUN_RESULT_STATE_UNSPECIFIED` + +const CleanRoomTaskRunResultStateSuccess CleanRoomTaskRunResultState = `SUCCESS` + +const CleanRoomTaskRunResultStateSuccessWithFailures CleanRoomTaskRunResultState = `SUCCESS_WITH_FAILURES` + +const CleanRoomTaskRunResultStateTimedout CleanRoomTaskRunResultState = `TIMEDOUT` + +const CleanRoomTaskRunResultStateUpstreamCanceled CleanRoomTaskRunResultState = `UPSTREAM_CANCELED` + +const CleanRoomTaskRunResultStateUpstreamEvicted CleanRoomTaskRunResultState = `UPSTREAM_EVICTED` + +const CleanRoomTaskRunResultStateUpstreamFailed CleanRoomTaskRunResultState = `UPSTREAM_FAILED` + +// String representation for [fmt.Print] +func (f *CleanRoomTaskRunResultState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CleanRoomTaskRunResultState) Set(v string) error { + switch v { + case `CANCELED`, `DISABLED`, `EVICTED`, `EXCLUDED`, `FAILED`, `MAXIMUM_CONCURRENT_RUNS_REACHED`, `RUN_RESULT_STATE_UNSPECIFIED`, `SUCCESS`, `SUCCESS_WITH_FAILURES`, `TIMEDOUT`, `UPSTREAM_CANCELED`, `UPSTREAM_EVICTED`, `UPSTREAM_FAILED`: + *f = CleanRoomTaskRunResultState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "DISABLED", "EVICTED", "EXCLUDED", "FAILED", "MAXIMUM_CONCURRENT_RUNS_REACHED", "RUN_RESULT_STATE_UNSPECIFIED", "SUCCESS", "SUCCESS_WITH_FAILURES", "TIMEDOUT", "UPSTREAM_CANCELED", "UPSTREAM_EVICTED", "UPSTREAM_FAILED"`, v) + } +} + +// Type always returns CleanRoomTaskRunResultState to satisfy [pflag.Value] interface +func (f *CleanRoomTaskRunResultState) Type() string { + return "CleanRoomTaskRunResultState" +} + +// Stores the run state of the clean rooms notebook task. +type CleanRoomTaskRunState struct { + // A value indicating the run's current lifecycle state. This field is + // always available in the response. + LifeCycleState CleanRoomTaskRunLifeCycleState `json:"life_cycle_state,omitempty"` + // A value indicating the run's result. This field is only available for + // terminal lifecycle states. + ResultState CleanRoomTaskRunResultState `json:"result_state,omitempty"` +} + +type CleanRoomsNotebookTask struct { + // The clean room that the notebook belongs to. + CleanRoomName string `json:"clean_room_name"` + // Checksum to validate the freshness of the notebook resource (i.e. the + // notebook being run is the latest version). It can be fetched by calling + // the :method:cleanroomassets/get API. + Etag string `json:"etag,omitempty"` + // Base parameters to be used for the clean room notebook job. + NotebookBaseParameters map[string]string `json:"notebook_base_parameters,omitempty"` + // Name of the notebook being run. + NotebookName string `json:"notebook_name"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomsNotebookTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomsNotebookTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput struct { + // The run state of the clean rooms notebook task. + CleanRoomJobRunState *CleanRoomTaskRunState `json:"clean_room_job_run_state,omitempty"` + // The notebook output for the clean room run + NotebookOutput *NotebookOutput `json:"notebook_output,omitempty"` + // Information on how to access the output schema for the clean room run + OutputSchemaInfo *OutputSchemaInfo `json:"output_schema_info,omitempty"` +} + +type ClientsTypes struct { + // With jobs set, the cluster can be used for jobs + Jobs bool `json:"jobs,omitempty"` + // With notebooks set, this cluster can be used for notebooks + Notebooks bool `json:"notebooks,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClientsTypes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClientsTypes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterInstance struct { + // The canonical identifier for the cluster used by a run. This field is + // always available for runs on existing clusters. For runs on new clusters, + // it becomes available once the cluster is created. This value can be used + // to view logs by browsing to `/#setting/sparkui/$cluster_id/driver-logs`. + // The logs continue to be available after the run completes. + // + // The response won’t include this field if the identifier is not + // available yet. + ClusterId string `json:"cluster_id,omitempty"` + // The canonical identifier for the Spark context used by a run. This field + // is filled in once the run begins execution. This value can be used to + // view the Spark UI by browsing to + // `/#setting/sparkui/$cluster_id/$spark_context_id`. The Spark UI continues + // to be available after the run has completed. + // + // The response won’t include this field if the identifier is not + // available yet. + SparkContextId string `json:"spark_context_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterInstance) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterInstance) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterLogConf struct { + // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : + // "dbfs:/home/cluster_log" } }` + Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` + // destination and either the region or endpoint need to be provided. e.g. + // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : + // "us-west-2" } }` Cluster iam role is used to access s3, please make sure + // the cluster iam role in `instance_profile_arn` has permission to write + // data to the s3 destination. + S3 *S3StorageInfo `json:"s3,omitempty"` + // destination needs to be provided. e.g. `{ "volumes" : { "destination" : + // "/Volumes/catalog/schema/volume/cluster_log" } }` + Volumes *VolumesStorageInfo `json:"volumes,omitempty"` +} + +type ClusterSpec struct { + // If existing_cluster_id, the ID of an existing cluster that is used for + // all runs. When running jobs or tasks on an existing cluster, you may need + // to manually restart the cluster if it stops responding. We suggest + // running jobs and tasks on new clusters for greater reliability + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + // If job_cluster_key, this task is executed reusing the cluster specified + // in `job.settings.job_clusters`. + JobClusterKey string `json:"job_cluster_key,omitempty"` + // An optional list of libraries to be installed on the cluster. The default + // value is an empty list. + Libraries []Library `json:"libraries,omitempty"` + // If new_cluster, a description of a new cluster that is created for each + // run. + NewCluster *ClusterSpec `json:"new_cluster,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Condition string + +const ConditionAllUpdated Condition = `ALL_UPDATED` + +const ConditionAnyUpdated Condition = `ANY_UPDATED` + +// String representation for [fmt.Print] +func (f *Condition) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Condition) Set(v string) error { + switch v { + case `ALL_UPDATED`, `ANY_UPDATED`: + *f = Condition(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALL_UPDATED", "ANY_UPDATED"`, v) + } +} + +// Type always returns Condition to satisfy [pflag.Value] interface +func (f *Condition) Type() string { + return "Condition" +} + +type ConditionTask struct { + // The left operand of the condition task. Can be either a string value or a + // job state or parameter reference. + Left string `json:"left"` + // * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their + // operands. This means that `“12.0” == “12”` will evaluate to + // `false`. * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, + // `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their + // operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” + // >= “12”` will evaluate to `false`. + // + // The boolean comparison to task values can be implemented with operators + // `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it + // will be serialized to `“true”` or `“false”` for the comparison. + Op ConditionTaskOp `json:"op"` + // The right operand of the condition task. Can be either a string value or + // a job state or parameter reference. + Right string `json:"right"` +} + +// * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their +// operands. This means that `“12.0” == “12”` will evaluate to `false`. +// * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` +// operators perform numeric comparison of their operands. `“12.0” >= +// “12”` will evaluate to `true`, `“10.0” >= “12”` will evaluate to +// `false`. +// +// The boolean comparison to task values can be implemented with operators +// `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will +// be serialized to `“true”` or `“false”` for the comparison. +type ConditionTaskOp string + +const ConditionTaskOpEqualTo ConditionTaskOp = `EQUAL_TO` + +const ConditionTaskOpGreaterThan ConditionTaskOp = `GREATER_THAN` + +const ConditionTaskOpGreaterThanOrEqual ConditionTaskOp = `GREATER_THAN_OR_EQUAL` + +const ConditionTaskOpLessThan ConditionTaskOp = `LESS_THAN` + +const ConditionTaskOpLessThanOrEqual ConditionTaskOp = `LESS_THAN_OR_EQUAL` + +const ConditionTaskOpNotEqual ConditionTaskOp = `NOT_EQUAL` + +// String representation for [fmt.Print] +func (f *ConditionTaskOp) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ConditionTaskOp) Set(v string) error { + switch v { + case `EQUAL_TO`, `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `NOT_EQUAL`: + *f = ConditionTaskOp(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EQUAL_TO", "GREATER_THAN", "GREATER_THAN_OR_EQUAL", "LESS_THAN", "LESS_THAN_OR_EQUAL", "NOT_EQUAL"`, v) + } +} + +// Type always returns ConditionTaskOp to satisfy [pflag.Value] interface +func (f *ConditionTaskOp) Type() string { + return "ConditionTaskOp" +} + +type Continuous struct { + // Indicate whether the continuous execution of the job is paused or not. + // Defaults to UNPAUSED. + PauseStatus PauseStatus `json:"pause_status,omitempty"` +} + +type CreateJob struct { + // List of permissions to set on the job. + AccessControlList []JobAccessControlRequest `json:"access_control_list,omitempty"` + // The id of the user specified budget policy to use for this job. If not + // specified, a default budget policy may be applied when creating or + // modifying the job. See `effective_budget_policy_id` for the budget policy + // used by this workload. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` + // An optional continuous property for this job. The continuous property + // will ensure that there is always one run executing. Only one of + // `schedule` and `continuous` can be used. + Continuous *Continuous `json:"continuous,omitempty"` + // Deployment information for jobs managed by external sources. + Deployment *JobDeployment `json:"deployment,omitempty"` + // An optional description for the job. The maximum length is 27700 + // characters in UTF-8 encoding. + Description string `json:"description,omitempty"` + // Edit mode of the job. + // + // * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. * + // `EDITABLE`: The job is in an editable state and can be modified. + EditMode JobEditMode `json:"edit_mode,omitempty"` + // An optional set of email addresses that is notified when runs of this job + // begin or complete as well as when this job is deleted. + EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` + // A list of task execution environment specifications that can be + // referenced by serverless tasks of this job. An environment is required to + // be present for serverless tasks. For serverless notebook tasks, the + // environment is accessible in the notebook environment panel. For other + // serverless tasks, the task environment is required to be specified using + // environment_key in the task settings. + Environments []JobEnvironment `json:"environments,omitempty"` + // Used to tell what is the format of the job. This field is ignored in + // Create/Update/Reset calls. When using the Jobs API 2.1 this value is + // always set to `"MULTI_TASK"`. + Format Format `json:"format,omitempty"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. + // + // If `git_source` is set, these tasks retrieve the file from the remote + // repository by default. However, this behavior can be overridden by + // setting `source` to `WORKSPACE` on the task. + // + // Note: dbt and SQL File tasks support only version-controlled sources. If + // dbt or SQL File tasks are used, `git_source` must be defined on the job. + GitSource *GitSource `json:"git_source,omitempty"` + // An optional set of health rules that can be defined for this job. + Health *JobsHealthRules `json:"health,omitempty"` + // A list of job cluster specifications that can be shared and reused by + // tasks of this job. Libraries cannot be declared in a shared job cluster. + // You must declare dependent libraries in task settings. If more than 100 + // job clusters are available, you can paginate through them using + // :method:jobs/get. + JobClusters []JobCluster `json:"job_clusters,omitempty"` + // An optional maximum allowed number of concurrent runs of the job. Set + // this value if you want to be able to execute multiple runs of the same + // job concurrently. This is useful for example if you trigger your job on a + // frequent schedule and want to allow consecutive runs to overlap with each + // other, or if you want to trigger multiple runs which differ by their + // input parameters. This setting affects only new runs. For example, + // suppose the job’s concurrency is 4 and there are 4 concurrent active + // runs. Then setting the concurrency to 3 won’t kill any of the active + // runs. However, from then on, new runs are skipped unless there are fewer + // than 3 active runs. This value cannot exceed 1000. Setting this value to + // `0` causes all new runs to be skipped. + MaxConcurrentRuns int `json:"max_concurrent_runs,omitempty"` + // An optional name for the job. The maximum length is 4096 bytes in UTF-8 + // encoding. + Name string `json:"name,omitempty"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // job. + NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` + // Job-level parameter definitions + Parameters []JobParameterDefinition `json:"parameters,omitempty"` + // PerformanceTarget defines how performant or cost efficient the execution + // of run on serverless should be. + PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` + // The queue settings of the job. + Queue *QueueSettings `json:"queue,omitempty"` + // Write-only setting. Specifies the user or service principal that the job + // runs as. If not specified, the job runs as the user who created the job. + // + // Either `user_name` or `service_principal_name` should be specified. If + // not, an error is thrown. + RunAs *JobRunAs `json:"run_as,omitempty"` + // An optional periodic schedule for this job. The default behavior is that + // the job only runs when triggered by clicking “Run Now” in the Jobs UI + // or sending an API request to `runNow`. + Schedule *CronSchedule `json:"schedule,omitempty"` + // A map of tags associated with the job. These are forwarded to the cluster + // as cluster tags for jobs clusters, and are subject to the same + // limitations as cluster tags. A maximum of 25 tags can be added to the + // job. + Tags map[string]string `json:"tags,omitempty"` + // A list of task specifications to be executed by this job. If more than + // 100 tasks are available, you can paginate through them using + // :method:jobs/get. Use the `next_page_token` field at the object root to + // determine if more results are available. + Tasks []Task `json:"tasks,omitempty"` + // An optional timeout applied to each run of this job. A value of `0` means + // no timeout. + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + // A configuration to trigger a run when certain conditions are met. The + // default behavior is that the job runs only when triggered by clicking + // “Run Now” in the Jobs UI or sending an API request to `runNow`. + Trigger *TriggerSettings `json:"trigger,omitempty"` + // A collection of system notification IDs to notify when runs of this job + // begin or complete. + WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateJob) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateJob) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Job was created successfully +type CreateResponse struct { + // The canonical identifier for the newly created job. + JobId int64 `json:"job_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CronSchedule struct { + // Indicate whether this schedule is paused or not. + PauseStatus PauseStatus `json:"pause_status,omitempty"` + // A Cron expression using Quartz syntax that describes the schedule for a + // job. See [Cron Trigger] for details. This field is required. + // + // [Cron Trigger]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html + QuartzCronExpression string `json:"quartz_cron_expression"` + // A Java timezone ID. The schedule for a job is resolved with respect to + // this timezone. See [Java TimeZone] for details. This field is required. + // + // [Java TimeZone]: https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html + TimezoneId string `json:"timezone_id"` +} + +// Data security mode decides what data governance model to use when accessing +// data from a cluster. +// +// The following modes can only be used with `kind`. * +// `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access +// mode depending on your compute configuration. * +// `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * +// `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. +// +// The following modes can be used regardless of `kind`. * `NONE`: No security +// isolation for multiple users sharing the cluster. Data governance features +// are not available in this mode. * `SINGLE_USER`: A secure cluster that can +// only be exclusively used by a single user specified in `single_user_name`. +// Most programming languages, cluster features and data governance features are +// available in this mode. * `USER_ISOLATION`: A secure cluster that can be +// shared by multiple users. Cluster users are fully isolated so that they +// cannot see each other's data and credentials. Most data governance features +// are supported in this mode. But programming languages and cluster features +// might be limited. +// +// The following modes are deprecated starting with Databricks Runtime 15.0 and +// will be removed for future Databricks Runtime versions: +// +// * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL +// clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating from +// legacy Passthrough on high concurrency clusters. * `LEGACY_SINGLE_USER`: This +// mode is for users migrating from legacy Passthrough on standard clusters. * +// `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have +// UC nor passthrough enabled. +type DataSecurityMode string + +// will choose the most appropriate access mode depending on your +// compute configuration. +const DataSecurityModeDataSecurityModeAuto DataSecurityMode = `DATA_SECURITY_MODE_AUTO` + +// Alias for `SINGLE_USER`. +const DataSecurityModeDataSecurityModeDedicated DataSecurityMode = `DATA_SECURITY_MODE_DEDICATED` + +// Alias for `USER_ISOLATION`. +const DataSecurityModeDataSecurityModeStandard DataSecurityMode = `DATA_SECURITY_MODE_STANDARD` + +// This mode is for users migrating from legacy Passthrough on high concurrency +// clusters. +const DataSecurityModeLegacyPassthrough DataSecurityMode = `LEGACY_PASSTHROUGH` + +// This mode is for users migrating from legacy Passthrough on standard +// clusters. +const DataSecurityModeLegacySingleUser DataSecurityMode = `LEGACY_SINGLE_USER` + +// This mode provides a way that doesn’t have UC nor passthrough enabled. +const DataSecurityModeLegacySingleUserStandard DataSecurityMode = `LEGACY_SINGLE_USER_STANDARD` + +// This mode is for users migrating from legacy Table ACL clusters. +const DataSecurityModeLegacyTableAcl DataSecurityMode = `LEGACY_TABLE_ACL` + +// No security isolation for multiple users sharing the cluster. Data governance +// features are not available in this mode. +const DataSecurityModeNone DataSecurityMode = `NONE` + +// A secure cluster that can only be exclusively used by a single user specified +// in `single_user_name`. Most programming languages, cluster features and data +// governance features are available in this mode. +const DataSecurityModeSingleUser DataSecurityMode = `SINGLE_USER` + +// A secure cluster that can be shared by multiple users. Cluster users are +// fully isolated so that they cannot see each other's data and credentials. +// Most data governance features are supported in this mode. But programming +// languages and cluster features might be limited. +const DataSecurityModeUserIsolation DataSecurityMode = `USER_ISOLATION` + +// String representation for [fmt.Print] +func (f *DataSecurityMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DataSecurityMode) Set(v string) error { + switch v { + case `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, `DATA_SECURITY_MODE_STANDARD`, `LEGACY_PASSTHROUGH`, `LEGACY_SINGLE_USER`, `LEGACY_SINGLE_USER_STANDARD`, `LEGACY_TABLE_ACL`, `NONE`, `SINGLE_USER`, `USER_ISOLATION`: + *f = DataSecurityMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATA_SECURITY_MODE_AUTO", "DATA_SECURITY_MODE_DEDICATED", "DATA_SECURITY_MODE_STANDARD", "LEGACY_PASSTHROUGH", "LEGACY_SINGLE_USER", "LEGACY_SINGLE_USER_STANDARD", "LEGACY_TABLE_ACL", "NONE", "SINGLE_USER", "USER_ISOLATION"`, v) + } +} + +// Type always returns DataSecurityMode to satisfy [pflag.Value] interface +func (f *DataSecurityMode) Type() string { + return "DataSecurityMode" +} + +type DbfsStorageInfo struct { + // dbfs destination, e.g. `dbfs:/my/path` + Destination string `json:"destination"` +} + +type DbtOutput struct { + // An optional map of headers to send when retrieving the artifact from the + // `artifacts_link`. + ArtifactsHeaders map[string]string `json:"artifacts_headers,omitempty"` + // A pre-signed URL to download the (compressed) dbt artifacts. This link is + // valid for a limited time (30 minutes). This information is only available + // after the run has finished. + ArtifactsLink string `json:"artifacts_link,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DbtOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DbtOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DbtTask struct { + // Optional name of the catalog to use. The value is the top level in the + // 3-level namespace of Unity Catalog (catalog / schema / relation). The + // catalog value can only be specified if a warehouse_id is specified. + // Requires dbt-databricks >= 1.1.1. + Catalog string `json:"catalog,omitempty"` + // A list of dbt commands to execute. All commands must start with `dbt`. + // This parameter must not be empty. A maximum of up to 10 commands can be + // provided. + Commands []string `json:"commands"` + // Optional (relative) path to the profiles directory. Can only be specified + // if no warehouse_id is specified. If no warehouse_id is specified and this + // folder is unset, the root directory is used. + ProfilesDirectory string `json:"profiles_directory,omitempty"` + // Path to the project directory. Optional for Git sourced tasks, in which + // case if no value is provided, the root of the Git repository is used. + ProjectDirectory string `json:"project_directory,omitempty"` + // Optional schema to write to. This parameter is only used when a + // warehouse_id is also provided. If not provided, the `default` schema is + // used. + Schema string `json:"schema,omitempty"` + // Optional location type of the project directory. When set to `WORKSPACE`, + // the project will be retrieved from the local Databricks workspace. When + // set to `GIT`, the project will be retrieved from a Git repository defined + // in `git_source`. If the value is empty, the task will use `GIT` if + // `git_source` is defined and `WORKSPACE` otherwise. + // + // * `WORKSPACE`: Project is located in Databricks workspace. * `GIT`: + // Project is located in cloud Git provider. + Source Source `json:"source,omitempty"` + // ID of the SQL warehouse to connect to. If provided, we automatically + // generate and provide the profile and connection details to dbt. It can be + // overridden on a per-command basis by using the `--profiles-dir` command + // line argument. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DbtTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DbtTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DeleteJob struct { + // The canonical identifier of the job to delete. This field is required. + JobId int64 `json:"job_id"` +} + +type DeleteResponse struct { +} + +type DeleteRun struct { + // ID of the run to delete. + RunId int64 `json:"run_id"` +} + +type DeleteRunResponse struct { +} + +type DockerBasicAuth struct { + // Password of the user + Password string `json:"password,omitempty"` + // Name of the user + Username string `json:"username,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DockerBasicAuth) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DockerBasicAuth) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DockerImage struct { + BasicAuth *DockerBasicAuth `json:"basic_auth,omitempty"` + // URL of the docker image. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DockerImage) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DockerImage) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of EBS volumes that will be launched with this cluster. +type EbsVolumeType string + +const EbsVolumeTypeGeneralPurposeSsd EbsVolumeType = `GENERAL_PURPOSE_SSD` + +const EbsVolumeTypeThroughputOptimizedHdd EbsVolumeType = `THROUGHPUT_OPTIMIZED_HDD` + +// String representation for [fmt.Print] +func (f *EbsVolumeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EbsVolumeType) Set(v string) error { + switch v { + case `GENERAL_PURPOSE_SSD`, `THROUGHPUT_OPTIMIZED_HDD`: + *f = EbsVolumeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GENERAL_PURPOSE_SSD", "THROUGHPUT_OPTIMIZED_HDD"`, v) + } +} + +// Type always returns EbsVolumeType to satisfy [pflag.Value] interface +func (f *EbsVolumeType) Type() string { + return "EbsVolumeType" +} + +// Represents a change to the job cluster's settings that would be required for +// the job clusters to become compliant with their policies. +type EnforcePolicyComplianceForJobResponseJobClusterSettingsChange struct { + // The field where this change would be made, prepended with the job cluster + // key. + Field string `json:"field,omitempty"` + // The new value of this field after enforcing policy compliance (either a + // number, a boolean, or a string) converted to a string. This is intended + // to be read by a human. The typed new value of this field can be retrieved + // by reading the settings field in the API response. + NewValue string `json:"new_value,omitempty"` + // The previous value of this field before enforcing policy compliance + // (either a number, a boolean, or a string) converted to a string. This is + // intended to be read by a human. The type of the field can be retrieved by + // reading the settings field in the API response. + PreviousValue string `json:"previous_value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EnforcePolicyComplianceForJobResponseJobClusterSettingsChange) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnforcePolicyComplianceForJobResponseJobClusterSettingsChange) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EnforcePolicyComplianceRequest struct { + // The ID of the job you want to enforce policy compliance on. + JobId int64 `json:"job_id"` + // If set, previews changes made to the job to comply with its policy, but + // does not update the job. + ValidateOnly bool `json:"validate_only,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EnforcePolicyComplianceRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnforcePolicyComplianceRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EnforcePolicyComplianceResponse struct { + // Whether any changes have been made to the job cluster settings for the + // job to become compliant with its policies. + HasChanges bool `json:"has_changes,omitempty"` + // A list of job cluster changes that have been made to the job’s cluster + // settings in order for all job clusters to become compliant with their + // policies. + JobClusterChanges []EnforcePolicyComplianceForJobResponseJobClusterSettingsChange `json:"job_cluster_changes,omitempty"` + // Updated job settings after policy enforcement. Policy enforcement only + // applies to job clusters that are created when running the job (which are + // specified in new_cluster) and does not apply to existing all-purpose + // clusters. Updated job settings are derived by applying policy default + // values to the existing job clusters in order to satisfy policy + // requirements. + Settings *JobSettings `json:"settings,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EnforcePolicyComplianceResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnforcePolicyComplianceResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The environment entity used to preserve serverless environment side panel and +// jobs' environment for non-notebook task. In this minimal environment spec, +// only pip dependencies are supported. +type Environment struct { + // Client version used by the environment The client is the user-facing + // environment of the runtime. Each client comes with a specific set of + // pre-installed libraries. The version is a string, consisting of the major + // client version. + Client string `json:"client"` + // List of pip dependencies, as supported by the version of pip in this + // environment. Each dependency is a pip requirement file line + // https://pip.pypa.io/en/stable/reference/requirements-file-format/ Allowed + // dependency could be , , (WSFS or Volumes in Databricks), E.g. + // dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"] + Dependencies []string `json:"dependencies,omitempty"` +} + +// Run was exported successfully. +type ExportRunOutput struct { + // The exported content in HTML format (one for every view item). To extract + // the HTML notebook from the JSON response, download and run this [Python + // script]. + // + // [Python script]: https://docs.databricks.com/en/_static/examples/extract.py + Views []ViewItem `json:"views,omitempty"` +} + +// Export and retrieve a job run +type ExportRunRequest struct { + // The canonical identifier for the run. This field is required. + RunId int64 `json:"-" url:"run_id"` + // Which views to export (CODE, DASHBOARDS, or ALL). Defaults to CODE. + ViewsToExport ViewsToExport `json:"-" url:"views_to_export,omitempty"` +} + +type FileArrivalTriggerConfiguration struct { + // If set, the trigger starts a run only after the specified amount of time + // passed since the last time the trigger fired. The minimum allowed value + // is 60 seconds + MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` + // URL to be monitored for file arrivals. The path must point to the root or + // a subpath of the external location. + Url string `json:"url"` + // If set, the trigger starts a run only after no file activity has occurred + // for the specified amount of time. This makes it possible to wait for a + // batch of incoming files to arrive before triggering a run. The minimum + // allowed value is 60 seconds. + WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *FileArrivalTriggerConfiguration) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FileArrivalTriggerConfiguration) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ForEachStats struct { + // Sample of 3 most common error messages occurred during the iteration. + ErrorMessageStats []ForEachTaskErrorMessageStats `json:"error_message_stats,omitempty"` + // Describes stats of the iteration. Only latest retries are considered. + TaskRunStats *ForEachTaskTaskRunStats `json:"task_run_stats,omitempty"` +} + +type ForEachTask struct { + // An optional maximum allowed number of concurrent runs of the task. Set + // this value if you want to be able to execute multiple runs of the task + // concurrently. + Concurrency int `json:"concurrency,omitempty"` + // Array for task to iterate on. This can be a JSON string or a reference to + // an array parameter. + Inputs string `json:"inputs"` + // Configuration for the task that will be run for each element in the array + Task Task `json:"task"` + + ForceSendFields []string `json:"-"` +} + +func (s *ForEachTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ForEachTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ForEachTaskErrorMessageStats struct { + // Describes the count of such error message encountered during the + // iterations. + Count int `json:"count,omitempty"` + // Describes the error message occured during the iterations. + ErrorMessage string `json:"error_message,omitempty"` + // Describes the termination reason for the error message. + TerminationCategory string `json:"termination_category,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ForEachTaskErrorMessageStats) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ForEachTaskErrorMessageStats) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ForEachTaskTaskRunStats struct { + // Describes the iteration runs having an active lifecycle state or an + // active run sub state. + ActiveIterations int `json:"active_iterations,omitempty"` + // Describes the number of failed and succeeded iteration runs. + CompletedIterations int `json:"completed_iterations,omitempty"` + // Describes the number of failed iteration runs. + FailedIterations int `json:"failed_iterations,omitempty"` + // Describes the number of iteration runs that have been scheduled. + ScheduledIterations int `json:"scheduled_iterations,omitempty"` + // Describes the number of succeeded iteration runs. + SucceededIterations int `json:"succeeded_iterations,omitempty"` + // Describes the length of the list of items to iterate over. + TotalIterations int `json:"total_iterations,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ForEachTaskTaskRunStats) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ForEachTaskTaskRunStats) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Format string + +const FormatMultiTask Format = `MULTI_TASK` + +const FormatSingleTask Format = `SINGLE_TASK` + +// String representation for [fmt.Print] +func (f *Format) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Format) Set(v string) error { + switch v { + case `MULTI_TASK`, `SINGLE_TASK`: + *f = Format(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MULTI_TASK", "SINGLE_TASK"`, v) + } +} + +// Type always returns Format to satisfy [pflag.Value] interface +func (f *Format) Type() string { + return "Format" +} + +type GcpAttributes struct { + // This field determines whether the instance pool will contain preemptible + // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs + // if the former is unavailable. + Availability GcpAvailability `json:"availability,omitempty"` + // boot disk size in GB + BootDiskSize int `json:"boot_disk_size,omitempty"` + // If provided, the cluster will impersonate the google service account when + // accessing gcloud services (like GCS). The google service account must + // have previously been added to the Databricks environment by an account + // administrator. + GoogleServiceAccount string `json:"google_service_account,omitempty"` + // If provided, each node (workers and driver) in the cluster will have this + // number of local SSDs attached. Each local SSD is 375GB in size. Refer to + // [GCP documentation] for the supported number of local SSDs for each + // instance type. + // + // [GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds + LocalSsdCount int `json:"local_ssd_count,omitempty"` + // This field determines whether the spark executors will be scheduled to + // run on preemptible VMs (when set to true) versus standard compute engine + // VMs (when set to false; default). Note: Soon to be deprecated, use the + // availability field instead. + UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` + // Identifier for the availability zone in which the cluster resides. This + // can be one of the following: - "HA" => High availability, spread nodes + // across availability zones for a Databricks deployment region [default] - + // "AUTO" => Databricks picks an availability zone to schedule the cluster + // on. - A GCP availability zone => Pick One of the available zones for + // (machine type + region) from + // https://cloud.google.com/compute/docs/regions-zones. + ZoneId string `json:"zone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GcpAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GcpAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// This field determines whether the instance pool will contain preemptible VMs, +// on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the +// former is unavailable. +type GcpAvailability string + +const GcpAvailabilityOnDemandGcp GcpAvailability = `ON_DEMAND_GCP` + +const GcpAvailabilityPreemptibleGcp GcpAvailability = `PREEMPTIBLE_GCP` + +const GcpAvailabilityPreemptibleWithFallbackGcp GcpAvailability = `PREEMPTIBLE_WITH_FALLBACK_GCP` + +// String representation for [fmt.Print] +func (f *GcpAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GcpAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_GCP`, `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP`: + *f = GcpAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_GCP", "PREEMPTIBLE_GCP", "PREEMPTIBLE_WITH_FALLBACK_GCP"`, v) + } +} + +// Type always returns GcpAvailability to satisfy [pflag.Value] interface +func (f *GcpAvailability) Type() string { + return "GcpAvailability" +} + +type GcsStorageInfo struct { + // GCS destination/URI, e.g. `gs://my-bucket/some-prefix` + Destination string `json:"destination"` +} + +// Get job permission levels +type GetJobPermissionLevelsRequest struct { + // The job for which to get or manage permissions. + JobId string `json:"-" url:"-"` +} + +type GetJobPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []JobPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get job permissions +type GetJobPermissionsRequest struct { + // The job for which to get or manage permissions. + JobId string `json:"-" url:"-"` +} + +// Get a single job +type GetJobRequest struct { + // The canonical identifier of the job to retrieve information about. This + // field is required. + JobId int64 `json:"-" url:"job_id"` + // Use `next_page_token` returned from the previous GetJob to request the + // next page of the job's sub-resources. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetJobRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetJobRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get job policy compliance +type GetPolicyComplianceRequest struct { + // The ID of the job whose compliance status you are requesting. + JobId int64 `json:"-" url:"job_id"` +} + +type GetPolicyComplianceResponse struct { + // Whether the job is compliant with its policies or not. Jobs could be out + // of compliance if a policy they are using was updated after the job was + // last edited and some of its job clusters no longer comply with their + // updated policies. + IsCompliant bool `json:"is_compliant,omitempty"` + // An object containing key-value mappings representing the first 200 policy + // validation errors. The keys indicate the path where the policy validation + // error is occurring. An identifier for the job cluster is prepended to the + // path. The values indicate an error message describing the policy + // validation error. + Violations map[string]string `json:"violations,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetPolicyComplianceResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetPolicyComplianceResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the output for a single run +type GetRunOutputRequest struct { + // The canonical identifier for the run. + RunId int64 `json:"-" url:"run_id"` +} + +// Get a single job run +type GetRunRequest struct { + // Whether to include the repair history in the response. + IncludeHistory bool `json:"-" url:"include_history,omitempty"` + // Whether to include resolved parameter values in the response. + IncludeResolvedValues bool `json:"-" url:"include_resolved_values,omitempty"` + // Use `next_page_token` returned from the previous GetRun to request the + // next page of the run's sub-resources. + PageToken string `json:"-" url:"page_token,omitempty"` + // The canonical identifier of the run for which to retrieve the metadata. + // This field is required. + RunId int64 `json:"-" url:"run_id"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetRunRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetRunRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GitProvider string + +const GitProviderAwsCodeCommit GitProvider = `awsCodeCommit` + +const GitProviderAzureDevOpsServices GitProvider = `azureDevOpsServices` + +const GitProviderBitbucketCloud GitProvider = `bitbucketCloud` + +const GitProviderBitbucketServer GitProvider = `bitbucketServer` + +const GitProviderGitHub GitProvider = `gitHub` + +const GitProviderGitHubEnterprise GitProvider = `gitHubEnterprise` + +const GitProviderGitLab GitProvider = `gitLab` + +const GitProviderGitLabEnterpriseEdition GitProvider = `gitLabEnterpriseEdition` + +// String representation for [fmt.Print] +func (f *GitProvider) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GitProvider) Set(v string) error { + switch v { + case `awsCodeCommit`, `azureDevOpsServices`, `bitbucketCloud`, `bitbucketServer`, `gitHub`, `gitHubEnterprise`, `gitLab`, `gitLabEnterpriseEdition`: + *f = GitProvider(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "awsCodeCommit", "azureDevOpsServices", "bitbucketCloud", "bitbucketServer", "gitHub", "gitHubEnterprise", "gitLab", "gitLabEnterpriseEdition"`, v) + } +} + +// Type always returns GitProvider to satisfy [pflag.Value] interface +func (f *GitProvider) Type() string { + return "GitProvider" +} + +// Read-only state of the remote repository at the time the job was run. This +// field is only included on job runs. +type GitSnapshot struct { + // Commit that was used to execute the run. If git_branch was specified, + // this points to the HEAD of the branch at the time of the run; if git_tag + // was specified, this points to the commit the tag points to. + UsedCommit string `json:"used_commit,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GitSnapshot) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GitSnapshot) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// An optional specification for a remote Git repository containing the source +// code used by tasks. Version-controlled source code is supported by notebook, +// dbt, Python script, and SQL File tasks. +// +// If `git_source` is set, these tasks retrieve the file from the remote +// repository by default. However, this behavior can be overridden by setting +// `source` to `WORKSPACE` on the task. +// +// Note: dbt and SQL File tasks support only version-controlled sources. If dbt +// or SQL File tasks are used, `git_source` must be defined on the job. +type GitSource struct { + // Name of the branch to be checked out and used by this job. This field + // cannot be specified in conjunction with git_tag or git_commit. + GitBranch string `json:"git_branch,omitempty"` + // Commit to be checked out and used by this job. This field cannot be + // specified in conjunction with git_branch or git_tag. + GitCommit string `json:"git_commit,omitempty"` + // Unique identifier of the service used to host the Git repository. The + // value is case insensitive. + GitProvider GitProvider `json:"git_provider"` + // Read-only state of the remote repository at the time the job was run. + // This field is only included on job runs. + GitSnapshot *GitSnapshot `json:"git_snapshot,omitempty"` + // Name of the tag to be checked out and used by this job. This field cannot + // be specified in conjunction with git_branch or git_commit. + GitTag string `json:"git_tag,omitempty"` + // URL of the repository to be cloned by this job. + GitUrl string `json:"git_url"` + // The source of the job specification in the remote repository when the job + // is source controlled. + JobSource *JobSource `json:"job_source,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GitSource) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GitSource) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InitScriptInfo struct { + // destination needs to be provided. e.g. `{ "abfss" : { "destination" : + // "abfss://@.dfs.core.windows.net/" + // } } + Abfss *Adlsgen2Info `json:"abfss,omitempty"` + // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : + // "dbfs:/home/cluster_log" } }` + Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` + // destination needs to be provided. e.g. `{ "file" : { "destination" : + // "file:/my/local/file.sh" } }` + File *LocalFileInfo `json:"file,omitempty"` + // destination needs to be provided. e.g. `{ "gcs": { "destination": + // "gs://my-bucket/file.sh" } }` + Gcs *GcsStorageInfo `json:"gcs,omitempty"` + // destination and either the region or endpoint need to be provided. e.g. + // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : + // "us-west-2" } }` Cluster iam role is used to access s3, please make sure + // the cluster iam role in `instance_profile_arn` has permission to write + // data to the s3 destination. + S3 *S3StorageInfo `json:"s3,omitempty"` + // destination needs to be provided. e.g. `{ "volumes" : { "destination" : + // "/Volumes/my-init.sh" } }` + Volumes *VolumesStorageInfo `json:"volumes,omitempty"` + // destination needs to be provided. e.g. `{ "workspace" : { "destination" : + // "/Users/user1@databricks.com/my-init.sh" } }` + Workspace *WorkspaceStorageInfo `json:"workspace,omitempty"` +} + +// Job was retrieved successfully. +type Job struct { + // The time at which this job was created in epoch milliseconds + // (milliseconds since 1/1/1970 UTC). + CreatedTime int64 `json:"created_time,omitempty"` + // The creator user name. This field won’t be included in the response if + // the user has already been deleted. + CreatorUserName string `json:"creator_user_name,omitempty"` + // The id of the budget policy used by this job for cost attribution + // purposes. This may be set through (in order of precedence): 1. Budget + // admins through the account or workspace console 2. Jobs UI in the job + // details page and Jobs API using `budget_policy_id` 3. Inferred default + // based on accessible budget policies of the run_as identity on job + // creation or modification. + EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` + // Indicates if the job has more sub-resources (`tasks`, `job_clusters`) + // that are not shown. They can be accessed via :method:jobs/get endpoint. + // It is only relevant for API 2.2 :method:jobs/list requests with + // `expand_tasks=true`. + HasMore bool `json:"has_more,omitempty"` + // The canonical identifier for this job. + JobId int64 `json:"job_id,omitempty"` + // A token that can be used to list the next page of sub-resources. + NextPageToken string `json:"next_page_token,omitempty"` + // The email of an active workspace user or the application ID of a service + // principal that the job runs as. This value can be changed by setting the + // `run_as` field when creating or updating a job. + // + // By default, `run_as_user_name` is based on the current job settings and + // is set to the creator of the job if job access control is disabled or to + // the user with the `is_owner` permission if job access control is enabled. + RunAsUserName string `json:"run_as_user_name,omitempty"` + // Settings for this job and all of its runs. These settings can be updated + // using the `resetJob` method. + Settings *JobSettings `json:"settings,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Job) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Job) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel JobPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobAccessControlResponse struct { + // All permissions. + AllPermissions []JobPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobCluster struct { + // A unique name for the job cluster. This field is required and must be + // unique within the job. `JobTaskSettings` may refer to this field to + // determine which cluster to launch for the task execution. + JobClusterKey string `json:"job_cluster_key"` + // If new_cluster, a description of a cluster that is created for each task. + NewCluster JobsClusterSpec `json:"new_cluster"` +} + +type JobCompliance struct { + // Whether this job is in compliance with the latest version of its policy. + IsCompliant bool `json:"is_compliant,omitempty"` + // Canonical unique identifier for a job. + JobId int64 `json:"job_id"` + // An object containing key-value mappings representing the first 200 policy + // validation errors. The keys indicate the path where the policy validation + // error is occurring. An identifier for the job cluster is prepended to the + // path. The values indicate an error message describing the policy + // validation error. + Violations map[string]string `json:"violations,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobCompliance) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobCompliance) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobDeployment struct { + // The kind of deployment that manages the job. + // + // * `BUNDLE`: The job is managed by Databricks Asset Bundle. + Kind JobDeploymentKind `json:"kind"` + // Path of the file that contains deployment metadata. + MetadataFilePath string `json:"metadata_file_path,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobDeployment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobDeployment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// * `BUNDLE`: The job is managed by Databricks Asset Bundle. +type JobDeploymentKind string + +// The job is managed by Databricks Asset Bundle. +const JobDeploymentKindBundle JobDeploymentKind = `BUNDLE` + +// String representation for [fmt.Print] +func (f *JobDeploymentKind) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobDeploymentKind) Set(v string) error { + switch v { + case `BUNDLE`: + *f = JobDeploymentKind(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BUNDLE"`, v) + } +} + +// Type always returns JobDeploymentKind to satisfy [pflag.Value] interface +func (f *JobDeploymentKind) Type() string { + return "JobDeploymentKind" +} + +// Edit mode of the job. +// +// * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. * +// `EDITABLE`: The job is in an editable state and can be modified. +type JobEditMode string + +// The job is in an editable state and can be modified. +const JobEditModeEditable JobEditMode = `EDITABLE` + +// The job is in a locked UI state and cannot be modified. +const JobEditModeUiLocked JobEditMode = `UI_LOCKED` + +// String representation for [fmt.Print] +func (f *JobEditMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobEditMode) Set(v string) error { + switch v { + case `EDITABLE`, `UI_LOCKED`: + *f = JobEditMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EDITABLE", "UI_LOCKED"`, v) + } +} + +// Type always returns JobEditMode to satisfy [pflag.Value] interface +func (f *JobEditMode) Type() string { + return "JobEditMode" +} + +type JobEmailNotifications struct { + // If true, do not send email to recipients specified in `on_failure` if the + // run is skipped. This field is `deprecated`. Please use the + // `notification_settings.no_alert_for_skipped_runs` field. + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` + // A list of email addresses to be notified when the duration of a run + // exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in + // the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is + // specified in the `health` field for the job, notifications are not sent. + OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` + // A list of email addresses to be notified when a run unsuccessfully + // completes. A run is considered to have completed unsuccessfully if it + // ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or + // `TIMED_OUT` result_state. If this is not specified on job creation, + // reset, or update the list is empty, and notifications are not sent. + OnFailure []string `json:"on_failure,omitempty"` + // A list of email addresses to be notified when a run begins. If not + // specified on job creation, reset, or update, the list is empty, and + // notifications are not sent. + OnStart []string `json:"on_start,omitempty"` + // A list of email addresses to notify when any streaming backlog thresholds + // are exceeded for any stream. Streaming backlog thresholds can be set in + // the `health` field using the following metrics: + // `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, + // `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is + // based on the 10-minute average of these metrics. If the issue persists, + // notifications are resent every 30 minutes. + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` + // A list of email addresses to be notified when a run successfully + // completes. A run is considered to have completed successfully if it ends + // with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If + // not specified on job creation, reset, or update, the list is empty, and + // notifications are not sent. + OnSuccess []string `json:"on_success,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobEmailNotifications) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobEmailNotifications) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobEnvironment struct { + // The key of an environment. It has to be unique within a job. + EnvironmentKey string `json:"environment_key"` + // The environment entity used to preserve serverless environment side panel + // and jobs' environment for non-notebook task. In this minimal environment + // spec, only pip dependencies are supported. + Spec *Environment `json:"spec,omitempty"` +} + +type JobNotificationSettings struct { + // If true, do not send notifications to recipients specified in + // `on_failure` if the run is canceled. + NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"` + // If true, do not send notifications to recipients specified in + // `on_failure` if the run is skipped. + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobNotificationSettings) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobNotificationSettings) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobParameter struct { + // The optional default value of the parameter + Default string `json:"default,omitempty"` + // The name of the parameter + Name string `json:"name,omitempty"` + // The value used in the run + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobParameter) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobParameter) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobParameterDefinition struct { + // Default value of the parameter. + Default string `json:"default"` + // The name of the defined parameter. May only contain alphanumeric + // characters, `_`, `-`, and `.` + Name string `json:"name"` +} + +type JobPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel JobPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type JobPermissionLevel string + +const JobPermissionLevelCanManage JobPermissionLevel = `CAN_MANAGE` + +const JobPermissionLevelCanManageRun JobPermissionLevel = `CAN_MANAGE_RUN` + +const JobPermissionLevelCanView JobPermissionLevel = `CAN_VIEW` + +const JobPermissionLevelIsOwner JobPermissionLevel = `IS_OWNER` + +// String representation for [fmt.Print] +func (f *JobPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobPermissionLevel) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_MANAGE_RUN`, `CAN_VIEW`, `IS_OWNER`: + *f = JobPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_MANAGE_RUN", "CAN_VIEW", "IS_OWNER"`, v) + } +} + +// Type always returns JobPermissionLevel to satisfy [pflag.Value] interface +func (f *JobPermissionLevel) Type() string { + return "JobPermissionLevel" +} + +type JobPermissions struct { + AccessControlList []JobAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel JobPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobPermissionsRequest struct { + AccessControlList []JobAccessControlRequest `json:"access_control_list,omitempty"` + // The job for which to get or manage permissions. + JobId string `json:"-" url:"-"` +} + +// Write-only setting. Specifies the user or service principal that the job runs +// as. If not specified, the job runs as the user who created the job. +// +// Either `user_name` or `service_principal_name` should be specified. If not, +// an error is thrown. +type JobRunAs struct { + // Application ID of an active service principal. Setting this field + // requires the `servicePrincipal/user` role. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // The email of an active workspace user. Non-admin users can only set this + // field to their own email. + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobRunAs) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobRunAs) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobSettings struct { + // The id of the user specified budget policy to use for this job. If not + // specified, a default budget policy may be applied when creating or + // modifying the job. See `effective_budget_policy_id` for the budget policy + // used by this workload. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` + // An optional continuous property for this job. The continuous property + // will ensure that there is always one run executing. Only one of + // `schedule` and `continuous` can be used. + Continuous *Continuous `json:"continuous,omitempty"` + // Deployment information for jobs managed by external sources. + Deployment *JobDeployment `json:"deployment,omitempty"` + // An optional description for the job. The maximum length is 27700 + // characters in UTF-8 encoding. + Description string `json:"description,omitempty"` + // Edit mode of the job. + // + // * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. * + // `EDITABLE`: The job is in an editable state and can be modified. + EditMode JobEditMode `json:"edit_mode,omitempty"` + // An optional set of email addresses that is notified when runs of this job + // begin or complete as well as when this job is deleted. + EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` + // A list of task execution environment specifications that can be + // referenced by serverless tasks of this job. An environment is required to + // be present for serverless tasks. For serverless notebook tasks, the + // environment is accessible in the notebook environment panel. For other + // serverless tasks, the task environment is required to be specified using + // environment_key in the task settings. + Environments []JobEnvironment `json:"environments,omitempty"` + // Used to tell what is the format of the job. This field is ignored in + // Create/Update/Reset calls. When using the Jobs API 2.1 this value is + // always set to `"MULTI_TASK"`. + Format Format `json:"format,omitempty"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. + // + // If `git_source` is set, these tasks retrieve the file from the remote + // repository by default. However, this behavior can be overridden by + // setting `source` to `WORKSPACE` on the task. + // + // Note: dbt and SQL File tasks support only version-controlled sources. If + // dbt or SQL File tasks are used, `git_source` must be defined on the job. + GitSource *GitSource `json:"git_source,omitempty"` + // An optional set of health rules that can be defined for this job. + Health *JobsHealthRules `json:"health,omitempty"` + // A list of job cluster specifications that can be shared and reused by + // tasks of this job. Libraries cannot be declared in a shared job cluster. + // You must declare dependent libraries in task settings. If more than 100 + // job clusters are available, you can paginate through them using + // :method:jobs/get. + JobClusters []JobCluster `json:"job_clusters,omitempty"` + // An optional maximum allowed number of concurrent runs of the job. Set + // this value if you want to be able to execute multiple runs of the same + // job concurrently. This is useful for example if you trigger your job on a + // frequent schedule and want to allow consecutive runs to overlap with each + // other, or if you want to trigger multiple runs which differ by their + // input parameters. This setting affects only new runs. For example, + // suppose the job’s concurrency is 4 and there are 4 concurrent active + // runs. Then setting the concurrency to 3 won’t kill any of the active + // runs. However, from then on, new runs are skipped unless there are fewer + // than 3 active runs. This value cannot exceed 1000. Setting this value to + // `0` causes all new runs to be skipped. + MaxConcurrentRuns int `json:"max_concurrent_runs,omitempty"` + // An optional name for the job. The maximum length is 4096 bytes in UTF-8 + // encoding. + Name string `json:"name,omitempty"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // job. + NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` + // Job-level parameter definitions + Parameters []JobParameterDefinition `json:"parameters,omitempty"` + // PerformanceTarget defines how performant or cost efficient the execution + // of run on serverless should be. + PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` + // The queue settings of the job. + Queue *QueueSettings `json:"queue,omitempty"` + // Write-only setting. Specifies the user or service principal that the job + // runs as. If not specified, the job runs as the user who created the job. + // + // Either `user_name` or `service_principal_name` should be specified. If + // not, an error is thrown. + RunAs *JobRunAs `json:"run_as,omitempty"` + // An optional periodic schedule for this job. The default behavior is that + // the job only runs when triggered by clicking “Run Now” in the Jobs UI + // or sending an API request to `runNow`. + Schedule *CronSchedule `json:"schedule,omitempty"` + // A map of tags associated with the job. These are forwarded to the cluster + // as cluster tags for jobs clusters, and are subject to the same + // limitations as cluster tags. A maximum of 25 tags can be added to the + // job. + Tags map[string]string `json:"tags,omitempty"` + // A list of task specifications to be executed by this job. If more than + // 100 tasks are available, you can paginate through them using + // :method:jobs/get. Use the `next_page_token` field at the object root to + // determine if more results are available. + Tasks []Task `json:"tasks,omitempty"` + // An optional timeout applied to each run of this job. A value of `0` means + // no timeout. + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + // A configuration to trigger a run when certain conditions are met. The + // default behavior is that the job runs only when triggered by clicking + // “Run Now” in the Jobs UI or sending an API request to `runNow`. + Trigger *TriggerSettings `json:"trigger,omitempty"` + // A collection of system notification IDs to notify when runs of this job + // begin or complete. + WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobSettings) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobSettings) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The source of the job specification in the remote repository when the job is +// source controlled. +type JobSource struct { + // Dirty state indicates the job is not fully synced with the job + // specification in the remote repository. + // + // Possible values are: * `NOT_SYNCED`: The job is not yet synced with the + // remote job specification. Import the remote job specification from UI to + // make the job fully synced. * `DISCONNECTED`: The job is temporary + // disconnected from the remote job specification and is allowed for live + // edit. Import the remote job specification again from UI to make the job + // fully synced. + DirtyState JobSourceDirtyState `json:"dirty_state,omitempty"` + // Name of the branch which the job is imported from. + ImportFromGitBranch string `json:"import_from_git_branch"` + // Path of the job YAML file that contains the job specification. + JobConfigPath string `json:"job_config_path"` +} + +// Dirty state indicates the job is not fully synced with the job specification +// in the remote repository. +// +// Possible values are: * `NOT_SYNCED`: The job is not yet synced with the +// remote job specification. Import the remote job specification from UI to make +// the job fully synced. * `DISCONNECTED`: The job is temporary disconnected +// from the remote job specification and is allowed for live edit. Import the +// remote job specification again from UI to make the job fully synced. +type JobSourceDirtyState string + +// The job is temporary disconnected from the remote job specification and is +// allowed for live edit. Import the remote job specification again from UI to +// make the job fully synced. +const JobSourceDirtyStateDisconnected JobSourceDirtyState = `DISCONNECTED` + +// The job is not yet synced with the remote job specification. Import the +// remote job specification from UI to make the job fully synced. +const JobSourceDirtyStateNotSynced JobSourceDirtyState = `NOT_SYNCED` + +// String representation for [fmt.Print] +func (f *JobSourceDirtyState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobSourceDirtyState) Set(v string) error { + switch v { + case `DISCONNECTED`, `NOT_SYNCED`: + *f = JobSourceDirtyState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "NOT_SYNCED"`, v) + } +} + +// Type always returns JobSourceDirtyState to satisfy [pflag.Value] interface +func (f *JobSourceDirtyState) Type() string { + return "JobSourceDirtyState" +} + +type JobsClusterSpec struct { + // When set to true, fixed and default values from the policy will be used + // for fields that are omitted. When set to false, only fixed values from + // the policy will be applied. + ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `json:"autoscale,omitempty"` + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` + // The configuration for delivering spark logs to a long-term storage + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName string `json:"cluster_name,omitempty"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // The following modes can only be used with `kind`. * + // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate + // access mode depending on your compute configuration. * + // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * + // `DATA_SECURITY_MODE_DEDICATED`: Alias for `SINGLE_USER`. + // + // The following modes can be used regardless of `kind`. * `NONE`: No + // security isolation for multiple users sharing the cluster. Data + // governance features are not available in this mode. * `SINGLE_USER`: A + // secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` + + DockerImage *DockerImage `json:"docker_image,omitempty"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId string `json:"instance_pool_id,omitempty"` + // This field can only be used with `kind`. + // + // When set to true, Databricks will automatically set single node related + // `custom_tags`, `spark_conf`, and `num_workers` + IsSingleNode bool `json:"is_single_node,omitempty"` + // The kind of compute described by this compute specification. + // + // Depending on `kind`, different validations and default values will be + // applied. + // + // The first usage of this value is for the simple cluster form where it + // sets `kind = CLASSIC_PREVIEW`. + Kind Kind `json:"kind,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id,omitempty"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers int `json:"num_workers,omitempty"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId string `json:"policy_id,omitempty"` + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. + RuntimeEngine RuntimeEngine `json:"runtime_engine,omitempty"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName string `json:"single_user_name,omitempty"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]string `json:"spark_conf,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion string `json:"spark_version,omitempty"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + // This field can only be used with `kind`. + // + // `effective_spark_version` is determined by `spark_version` (DBR release), + // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or + // not. + UseMlRuntime bool `json:"use_ml_runtime,omitempty"` + + WorkloadType *WorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobsClusterSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobsClusterSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Specifies the health metric that is being evaluated for a particular health +// rule. +// +// * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * +// `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting +// to be consumed across all streams. This metric is in Public Preview. * +// `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all +// streams. This metric is in Public Preview. * `STREAMING_BACKLOG_SECONDS`: An +// estimate of the maximum consumer delay across all streams. This metric is in +// Public Preview. * `STREAMING_BACKLOG_FILES`: An estimate of the maximum +// number of outstanding files across all streams. This metric is in Public +// Preview. +type JobsHealthMetric string + +// Expected total time for a run in seconds. +const JobsHealthMetricRunDurationSeconds JobsHealthMetric = `RUN_DURATION_SECONDS` + +// An estimate of the maximum bytes of data waiting to be consumed across all +// streams. This metric is in Public Preview. +const JobsHealthMetricStreamingBacklogBytes JobsHealthMetric = `STREAMING_BACKLOG_BYTES` + +// An estimate of the maximum number of outstanding files across all streams. +// This metric is in Public Preview. +const JobsHealthMetricStreamingBacklogFiles JobsHealthMetric = `STREAMING_BACKLOG_FILES` + +// An estimate of the maximum offset lag across all streams. This metric is in +// Public Preview. +const JobsHealthMetricStreamingBacklogRecords JobsHealthMetric = `STREAMING_BACKLOG_RECORDS` + +// An estimate of the maximum consumer delay across all streams. This metric is +// in Public Preview. +const JobsHealthMetricStreamingBacklogSeconds JobsHealthMetric = `STREAMING_BACKLOG_SECONDS` + +// String representation for [fmt.Print] +func (f *JobsHealthMetric) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobsHealthMetric) Set(v string) error { + switch v { + case `RUN_DURATION_SECONDS`, `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_FILES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`: + *f = JobsHealthMetric(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "RUN_DURATION_SECONDS", "STREAMING_BACKLOG_BYTES", "STREAMING_BACKLOG_FILES", "STREAMING_BACKLOG_RECORDS", "STREAMING_BACKLOG_SECONDS"`, v) + } +} + +// Type always returns JobsHealthMetric to satisfy [pflag.Value] interface +func (f *JobsHealthMetric) Type() string { + return "JobsHealthMetric" +} + +// Specifies the operator used to compare the health metric value with the +// specified threshold. +type JobsHealthOperator string + +const JobsHealthOperatorGreaterThan JobsHealthOperator = `GREATER_THAN` + +// String representation for [fmt.Print] +func (f *JobsHealthOperator) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobsHealthOperator) Set(v string) error { + switch v { + case `GREATER_THAN`: + *f = JobsHealthOperator(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GREATER_THAN"`, v) + } +} + +// Type always returns JobsHealthOperator to satisfy [pflag.Value] interface +func (f *JobsHealthOperator) Type() string { + return "JobsHealthOperator" +} + +type JobsHealthRule struct { + // Specifies the health metric that is being evaluated for a particular + // health rule. + // + // * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * + // `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data + // waiting to be consumed across all streams. This metric is in Public + // Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset + // lag across all streams. This metric is in Public Preview. * + // `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay + // across all streams. This metric is in Public Preview. * + // `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of + // outstanding files across all streams. This metric is in Public Preview. + Metric JobsHealthMetric `json:"metric"` + // Specifies the operator used to compare the health metric value with the + // specified threshold. + Op JobsHealthOperator `json:"op"` + // Specifies the threshold value that the health metric should obey to + // satisfy the health rule. + Value int64 `json:"value"` +} + +// An optional set of health rules that can be defined for this job. +type JobsHealthRules struct { + Rules []JobsHealthRule `json:"rules,omitempty"` +} + +// The kind of compute described by this compute specification. +// +// Depending on `kind`, different validations and default values will be +// applied. +// +// The first usage of this value is for the simple cluster form where it sets +// `kind = CLASSIC_PREVIEW`. +type Kind string + +const KindClassicPreview Kind = `CLASSIC_PREVIEW` + +// String representation for [fmt.Print] +func (f *Kind) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Kind) Set(v string) error { + switch v { + case `CLASSIC_PREVIEW`: + *f = Kind(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC_PREVIEW"`, v) + } +} + +// Type always returns Kind to satisfy [pflag.Value] interface +func (f *Kind) Type() string { + return "Kind" +} + +type Library struct { + // Specification of a CRAN library to be installed as part of the library + Cran *RCranLibrary `json:"cran,omitempty"` + // Deprecated. URI of the egg library to install. Installing Python egg + // files is deprecated and is not supported in Databricks Runtime 14.0 and + // above. + Egg string `json:"egg,omitempty"` + // URI of the JAR library to install. Supported URIs include Workspace + // paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "jar": + // "/Workspace/path/to/library.jar" }`, `{ "jar" : + // "/Volumes/path/to/library.jar" }` or `{ "jar": + // "s3://my-bucket/library.jar" }`. If S3 is used, please make sure the + // cluster has read access on the library. You may need to launch the + // cluster with an IAM role to access the S3 URI. + Jar string `json:"jar,omitempty"` + // Specification of a maven library to be installed. For example: `{ + // "coordinates": "org.jsoup:jsoup:1.7.2" }` + Maven *MavenLibrary `json:"maven,omitempty"` + // Specification of a PyPi library to be installed. For example: `{ + // "package": "simplejson" }` + Pypi *PythonPyPiLibrary `json:"pypi,omitempty"` + // URI of the requirements.txt file to install. Only Workspace paths and + // Unity Catalog Volumes paths are supported. For example: `{ + // "requirements": "/Workspace/path/to/requirements.txt" }` or `{ + // "requirements" : "/Volumes/path/to/requirements.txt" }` + Requirements string `json:"requirements,omitempty"` + // URI of the wheel library to install. Supported URIs include Workspace + // paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "whl": + // "/Workspace/path/to/library.whl" }`, `{ "whl" : + // "/Volumes/path/to/library.whl" }` or `{ "whl": + // "s3://my-bucket/library.whl" }`. If S3 is used, please make sure the + // cluster has read access on the library. You may need to launch the + // cluster with an IAM role to access the S3 URI. + Whl string `json:"whl,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Library) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Library) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListJobComplianceForPolicyResponse struct { + // A list of jobs and their policy compliance statuses. + Jobs []JobCompliance `json:"jobs,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If this field is not in the response, it means no further + // results for the request. + NextPageToken string `json:"next_page_token,omitempty"` + // This field represents the pagination token to retrieve the previous page + // of results. If this field is not in the response, it means no further + // results for the request. + PrevPageToken string `json:"prev_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListJobComplianceForPolicyResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListJobComplianceForPolicyResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List job policy compliance +type ListJobComplianceRequest struct { + // Use this field to specify the maximum number of results to be returned by + // the server. The server may further constrain the maximum number of + // results returned in a single page. + PageSize int `json:"-" url:"page_size,omitempty"` + // A page token that can be used to navigate to the next page or previous + // page as returned by `next_page_token` or `prev_page_token`. + PageToken string `json:"-" url:"page_token,omitempty"` + // Canonical unique identifier for the cluster policy. + PolicyId string `json:"-" url:"policy_id"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListJobComplianceRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListJobComplianceRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List jobs +type ListJobsRequest struct { + // Whether to include task and cluster details in the response. Note that in + // API 2.2, only the first 100 elements will be shown. Use :method:jobs/get + // to paginate through all tasks and clusters. + ExpandTasks bool `json:"-" url:"expand_tasks,omitempty"` + // The number of jobs to return. This value must be greater than 0 and less + // or equal to 100. The default value is 20. + Limit int `json:"-" url:"limit,omitempty"` + // A filter on the list based on the exact (case insensitive) job name. + Name string `json:"-" url:"name,omitempty"` + // The offset of the first job to return, relative to the most recently + // created job. Deprecated since June 2023. Use `page_token` to iterate + // through the pages instead. + Offset int `json:"-" url:"offset,omitempty"` + // Use `next_page_token` or `prev_page_token` returned from the previous + // request to list the next or previous page of jobs respectively. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListJobsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListJobsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List of jobs was retrieved successfully. +type ListJobsResponse struct { + // If true, additional jobs matching the provided filter are available for + // listing. + HasMore bool `json:"has_more,omitempty"` + // The list of jobs. Only included in the response if there are jobs to + // list. + Jobs []BaseJob `json:"jobs,omitempty"` + // A token that can be used to list the next page of jobs (if applicable). + NextPageToken string `json:"next_page_token,omitempty"` + // A token that can be used to list the previous page of jobs (if + // applicable). + PrevPageToken string `json:"prev_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListJobsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListJobsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List job runs +type ListRunsRequest struct { + // If active_only is `true`, only active runs are included in the results; + // otherwise, lists both active and completed runs. An active run is a run + // in the `QUEUED`, `PENDING`, `RUNNING`, or `TERMINATING`. This field + // cannot be `true` when completed_only is `true`. + ActiveOnly bool `json:"-" url:"active_only,omitempty"` + // If completed_only is `true`, only completed runs are included in the + // results; otherwise, lists both active and completed runs. This field + // cannot be `true` when active_only is `true`. + CompletedOnly bool `json:"-" url:"completed_only,omitempty"` + // Whether to include task and cluster details in the response. Note that in + // API 2.2, only the first 100 elements will be shown. Use + // :method:jobs/getrun to paginate through all tasks and clusters. + ExpandTasks bool `json:"-" url:"expand_tasks,omitempty"` + // The job for which to list runs. If omitted, the Jobs service lists runs + // from all jobs. + JobId int64 `json:"-" url:"job_id,omitempty"` + // The number of runs to return. This value must be greater than 0 and less + // than 25. The default value is 20. If a request specifies a limit of 0, + // the service instead uses the maximum limit. + Limit int `json:"-" url:"limit,omitempty"` + // The offset of the first run to return, relative to the most recent run. + // Deprecated since June 2023. Use `page_token` to iterate through the pages + // instead. + Offset int `json:"-" url:"offset,omitempty"` + // Use `next_page_token` or `prev_page_token` returned from the previous + // request to list the next or previous page of runs respectively. + PageToken string `json:"-" url:"page_token,omitempty"` + // The type of runs to return. For a description of run types, see + // :method:jobs/getRun. + RunType RunType `json:"-" url:"run_type,omitempty"` + // Show runs that started _at or after_ this value. The value must be a UTC + // timestamp in milliseconds. Can be combined with _start_time_to_ to filter + // by a time range. + StartTimeFrom int64 `json:"-" url:"start_time_from,omitempty"` + // Show runs that started _at or before_ this value. The value must be a UTC + // timestamp in milliseconds. Can be combined with _start_time_from_ to + // filter by a time range. + StartTimeTo int64 `json:"-" url:"start_time_to,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListRunsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListRunsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List of runs was retrieved successfully. +type ListRunsResponse struct { + // If true, additional runs matching the provided filter are available for + // listing. + HasMore bool `json:"has_more,omitempty"` + // A token that can be used to list the next page of runs (if applicable). + NextPageToken string `json:"next_page_token,omitempty"` + // A token that can be used to list the previous page of runs (if + // applicable). + PrevPageToken string `json:"prev_page_token,omitempty"` + // A list of runs, from most recently started to least. Only included in the + // response if there are runs to list. + Runs []BaseRun `json:"runs,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListRunsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListRunsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type LocalFileInfo struct { + // local file destination, e.g. `file:/my/local/file.sh` + Destination string `json:"destination"` +} + +type LogAnalyticsInfo struct { + // + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + // + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogAnalyticsInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogAnalyticsInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type MavenLibrary struct { + // Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2". + Coordinates string `json:"coordinates"` + // List of dependences to exclude. For example: `["slf4j:slf4j", + // "*:hadoop-client"]`. + // + // Maven dependency exclusions: + // https://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html. + Exclusions []string `json:"exclusions,omitempty"` + // Maven repo to install the Maven package from. If omitted, both Maven + // Central Repository and Spark Packages are searched. + Repo string `json:"repo,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MavenLibrary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MavenLibrary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type NotebookOutput struct { + // The value passed to + // [dbutils.notebook.exit()](/notebooks/notebook-workflows.html#notebook-workflows-exit). + // Databricks restricts this API to return the first 5 MB of the value. For + // a larger result, your job can store the results in a cloud storage + // service. This field is absent if `dbutils.notebook.exit()` was never + // called. + Result string `json:"result,omitempty"` + // Whether or not the result was truncated. + Truncated bool `json:"truncated,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NotebookOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NotebookOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type NotebookTask struct { + // Base parameters to be used for each run of this job. If the run is + // initiated by a call to :method:jobs/run Now with parameters specified, + // the two parameters maps are merged. If the same key is specified in + // `base_parameters` and in `run-now`, the value from `run-now` is used. Use + // [Task parameter variables] to set parameters containing information about + // job runs. + // + // If the notebook takes a parameter that is not specified in the job’s + // `base_parameters` or the `run-now` override parameters, the default value + // from the notebook is used. + // + // Retrieve these parameters in a notebook using [dbutils.widgets.get]. + // + // The JSON representation of this field cannot exceed 1MB. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets + BaseParameters map[string]string `json:"base_parameters,omitempty"` + // The path of the notebook to be run in the Databricks workspace or remote + // repository. For notebooks stored in the Databricks workspace, the path + // must be absolute and begin with a slash. For notebooks stored in a remote + // repository, the path must be relative. This field is required. + NotebookPath string `json:"notebook_path"` + // Optional location type of the notebook. When set to `WORKSPACE`, the + // notebook will be retrieved from the local Databricks workspace. When set + // to `GIT`, the notebook will be retrieved from a Git repository defined in + // `git_source`. If the value is empty, the task will use `GIT` if + // `git_source` is defined and `WORKSPACE` otherwise. * `WORKSPACE`: + // Notebook is located in Databricks workspace. * `GIT`: Notebook is located + // in cloud Git provider. + Source Source `json:"source,omitempty"` + // Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic + // SQL warehouses are NOT supported, please use serverless or pro SQL + // warehouses. + // + // Note that SQL warehouses only support SQL cells; if the notebook contains + // non-SQL cells, the run will fail. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NotebookTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NotebookTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Stores the catalog name, schema name, and the output schema expiration time +// for the clean room run. +type OutputSchemaInfo struct { + CatalogName string `json:"catalog_name,omitempty"` + // The expiration time for the output schema as a Unix timestamp in + // milliseconds. + ExpirationTime int64 `json:"expiration_time,omitempty"` + + SchemaName string `json:"schema_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *OutputSchemaInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s OutputSchemaInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PauseStatus string + +const PauseStatusPaused PauseStatus = `PAUSED` + +const PauseStatusUnpaused PauseStatus = `UNPAUSED` + +// String representation for [fmt.Print] +func (f *PauseStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PauseStatus) Set(v string) error { + switch v { + case `PAUSED`, `UNPAUSED`: + *f = PauseStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PAUSED", "UNPAUSED"`, v) + } +} + +// Type always returns PauseStatus to satisfy [pflag.Value] interface +func (f *PauseStatus) Type() string { + return "PauseStatus" +} + +// PerformanceTarget defines how performant (lower latency) or cost efficient +// the execution of run on serverless compute should be. The performance mode on +// the job or pipeline should map to a performance setting that is passed to +// Cluster Manager (see cluster-common PerformanceTarget). +type PerformanceTarget string + +const PerformanceTargetCostOptimized PerformanceTarget = `COST_OPTIMIZED` + +const PerformanceTargetPerformanceOptimized PerformanceTarget = `PERFORMANCE_OPTIMIZED` + +// String representation for [fmt.Print] +func (f *PerformanceTarget) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PerformanceTarget) Set(v string) error { + switch v { + case `COST_OPTIMIZED`, `PERFORMANCE_OPTIMIZED`: + *f = PerformanceTarget(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COST_OPTIMIZED", "PERFORMANCE_OPTIMIZED"`, v) + } +} + +// Type always returns PerformanceTarget to satisfy [pflag.Value] interface +func (f *PerformanceTarget) Type() string { + return "PerformanceTarget" +} + +type PeriodicTriggerConfiguration struct { + // The interval at which the trigger should run. + Interval int `json:"interval"` + // The unit of time for the interval. + Unit PeriodicTriggerConfigurationTimeUnit `json:"unit"` +} + +type PeriodicTriggerConfigurationTimeUnit string + +const PeriodicTriggerConfigurationTimeUnitDays PeriodicTriggerConfigurationTimeUnit = `DAYS` + +const PeriodicTriggerConfigurationTimeUnitHours PeriodicTriggerConfigurationTimeUnit = `HOURS` + +const PeriodicTriggerConfigurationTimeUnitWeeks PeriodicTriggerConfigurationTimeUnit = `WEEKS` + +// String representation for [fmt.Print] +func (f *PeriodicTriggerConfigurationTimeUnit) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PeriodicTriggerConfigurationTimeUnit) Set(v string) error { + switch v { + case `DAYS`, `HOURS`, `WEEKS`: + *f = PeriodicTriggerConfigurationTimeUnit(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DAYS", "HOURS", "WEEKS"`, v) + } +} + +// Type always returns PeriodicTriggerConfigurationTimeUnit to satisfy [pflag.Value] interface +func (f *PeriodicTriggerConfigurationTimeUnit) Type() string { + return "PeriodicTriggerConfigurationTimeUnit" +} + +type PipelineParams struct { + // If true, triggers a full refresh on the delta live table. + FullRefresh bool `json:"full_refresh,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineParams) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineParams) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PipelineTask struct { + // If true, triggers a full refresh on the delta live table. + FullRefresh bool `json:"full_refresh,omitempty"` + // The full name of the pipeline task to execute. + PipelineId string `json:"pipeline_id"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PythonPyPiLibrary struct { + // The name of the pypi package to install. An optional exact version + // specification is also supported. Examples: "simplejson" and + // "simplejson==3.8.0". + Package string `json:"package"` + // The repository where the package can be found. If not specified, the + // default pip index is used. + Repo string `json:"repo,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PythonPyPiLibrary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PythonPyPiLibrary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PythonWheelTask struct { + // Named entry point to use, if it does not exist in the metadata of the + // package it executes the function from the package directly using + // `$packageName.$entryPoint()` + EntryPoint string `json:"entry_point"` + // Command-line parameters passed to Python wheel task in the form of + // `["--name=task", "--data=dbfs:/path/to/data.json"]`. Leave it empty if + // `parameters` is not null. + NamedParameters map[string]string `json:"named_parameters,omitempty"` + // Name of the package to execute + PackageName string `json:"package_name"` + // Command-line parameters passed to Python wheel task. Leave it empty if + // `named_parameters` is not null. + Parameters []string `json:"parameters,omitempty"` +} + +type QueueDetails struct { + // The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run + // was queued due to reaching the workspace limit of active task runs. * + // `MAX_CONCURRENT_RUNS_REACHED`: The run was queued due to reaching the + // per-job limit of concurrent job runs. * + // `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run was queued due to reaching + // the workspace limit of active run job tasks. + Code QueueDetailsCodeCode `json:"code,omitempty"` + // A descriptive message with the queuing details. This field is + // unstructured, and its exact format is subject to change. + Message string `json:"message,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueueDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueueDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The reason for queuing the run. * `ACTIVE_RUNS_LIMIT_REACHED`: The run was +// queued due to reaching the workspace limit of active task runs. * +// `MAX_CONCURRENT_RUNS_REACHED`: The run was queued due to reaching the per-job +// limit of concurrent job runs. * `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`: The run +// was queued due to reaching the workspace limit of active run job tasks. +type QueueDetailsCodeCode string + +// The run was queued due to reaching the workspace limit of active task runs. +const QueueDetailsCodeCodeActiveRunsLimitReached QueueDetailsCodeCode = `ACTIVE_RUNS_LIMIT_REACHED` + +// The run was queued due to reaching the workspace limit of active run job +// tasks. +const QueueDetailsCodeCodeActiveRunJobTasksLimitReached QueueDetailsCodeCode = `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED` + +// The run was queued due to reaching the per-job limit of concurrent job runs. +const QueueDetailsCodeCodeMaxConcurrentRunsReached QueueDetailsCodeCode = `MAX_CONCURRENT_RUNS_REACHED` + +// String representation for [fmt.Print] +func (f *QueueDetailsCodeCode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *QueueDetailsCodeCode) Set(v string) error { + switch v { + case `ACTIVE_RUNS_LIMIT_REACHED`, `ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED`, `MAX_CONCURRENT_RUNS_REACHED`: + *f = QueueDetailsCodeCode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE_RUNS_LIMIT_REACHED", "ACTIVE_RUN_JOB_TASKS_LIMIT_REACHED", "MAX_CONCURRENT_RUNS_REACHED"`, v) + } +} + +// Type always returns QueueDetailsCodeCode to satisfy [pflag.Value] interface +func (f *QueueDetailsCodeCode) Type() string { + return "QueueDetailsCodeCode" +} + +type QueueSettings struct { + // If true, enable queueing for the job. This is a required field. + Enabled bool `json:"enabled"` +} + +type RCranLibrary struct { + // The name of the CRAN package to install. + Package string `json:"package"` + // The repository where the package can be found. If not specified, the + // default CRAN repo is used. + Repo string `json:"repo,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RCranLibrary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RCranLibrary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RepairHistoryItem struct { + // The end time of the (repaired) run. + EndTime int64 `json:"end_time,omitempty"` + // The ID of the repair. Only returned for the items that represent a repair + // in `repair_history`. + Id int64 `json:"id,omitempty"` + // The start time of the (repaired) run. + StartTime int64 `json:"start_time,omitempty"` + // Deprecated. Please use the `status` field instead. + State *RunState `json:"state,omitempty"` + // The current status of the run + Status *RunStatus `json:"status,omitempty"` + // The run IDs of the task runs that ran as part of this repair history + // item. + TaskRunIds []int64 `json:"task_run_ids,omitempty"` + // The repair history item type. Indicates whether a run is the original run + // or a repair run. + Type RepairHistoryItemType `json:"type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RepairHistoryItem) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RepairHistoryItem) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The repair history item type. Indicates whether a run is the original run or +// a repair run. +type RepairHistoryItemType string + +const RepairHistoryItemTypeOriginal RepairHistoryItemType = `ORIGINAL` + +const RepairHistoryItemTypeRepair RepairHistoryItemType = `REPAIR` + +// String representation for [fmt.Print] +func (f *RepairHistoryItemType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RepairHistoryItemType) Set(v string) error { + switch v { + case `ORIGINAL`, `REPAIR`: + *f = RepairHistoryItemType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ORIGINAL", "REPAIR"`, v) + } +} + +// Type always returns RepairHistoryItemType to satisfy [pflag.Value] interface +func (f *RepairHistoryItemType) Type() string { + return "RepairHistoryItemType" +} + +type RepairRun struct { + // An array of commands to execute for jobs with the dbt task, for example + // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt + // run"]` + DbtCommands []string `json:"dbt_commands,omitempty"` + // A list of parameters for jobs with Spark JAR tasks, for example + // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the + // main function of the main class specified in the Spark JAR task. If not + // specified upon `run-now`, it defaults to an empty list. jar_params cannot + // be specified in conjunction with notebook_params. The JSON representation + // of this field (for example `{"jar_params":["john doe","35"]}`) cannot + // exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + JarParams []string `json:"jar_params,omitempty"` + // Job-level parameters used in the run. for example `"param": + // "overriding_val"` + JobParameters map[string]string `json:"job_parameters,omitempty"` + // The ID of the latest repair. This parameter is not required when + // repairing a run for the first time, but must be provided on subsequent + // requests to repair the same run. + LatestRepairId int64 `json:"latest_repair_id,omitempty"` + // A map from keys to values for jobs with notebook task, for example + // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed + // to the notebook and is accessible through the [dbutils.widgets.get] + // function. + // + // If not specified upon `run-now`, the triggered run uses the job’s base + // parameters. + // + // notebook_params cannot be specified in conjunction with jar_params. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // The JSON representation of this field (for example + // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed + // 10,000 bytes. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + NotebookParams map[string]string `json:"notebook_params,omitempty"` + // Controls whether the pipeline should perform a full refresh + PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` + + PythonNamedParams map[string]string `json:"python_named_params,omitempty"` + // A list of parameters for jobs with Python tasks, for example + // `"python_params": ["john doe", "35"]`. The parameters are passed to + // Python file as command-line parameters. If specified upon `run-now`, it + // would overwrite the parameters specified in job setting. The JSON + // representation of this field (for example `{"python_params":["john + // doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + PythonParams []string `json:"python_params,omitempty"` + // If true, repair all failed tasks. Only one of `rerun_tasks` or + // `rerun_all_failed_tasks` can be used. + RerunAllFailedTasks bool `json:"rerun_all_failed_tasks,omitempty"` + // If true, repair all tasks that depend on the tasks in `rerun_tasks`, even + // if they were previously successful. Can be also used in combination with + // `rerun_all_failed_tasks`. + RerunDependentTasks bool `json:"rerun_dependent_tasks,omitempty"` + // The task keys of the task runs to repair. + RerunTasks []string `json:"rerun_tasks,omitempty"` + // The job run ID of the run to repair. The run must not be in progress. + RunId int64 `json:"run_id"` + // A list of parameters for jobs with spark submit task, for example + // `"spark_submit_params": ["--class", + // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to + // spark-submit script as command-line parameters. If specified upon + // `run-now`, it would overwrite the parameters specified in job setting. + // The JSON representation of this field (for example + // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + SparkSubmitParams []string `json:"spark_submit_params,omitempty"` + // A map from keys to values for jobs with SQL task, for example + // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task + // does not support custom parameters. + SqlParams map[string]string `json:"sql_params,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RepairRun) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RepairRun) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Run repair was initiated. +type RepairRunResponse struct { + // The ID of the repair. Must be provided in subsequent repairs using the + // `latest_repair_id` field to ensure sequential repairs. + RepairId int64 `json:"repair_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RepairRunResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RepairRunResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ResetJob struct { + // The canonical identifier of the job to reset. This field is required. + JobId int64 `json:"job_id"` + // The new settings of the job. These settings completely replace the old + // settings. + // + // Changes to the field `JobBaseSettings.timeout_seconds` are applied to + // active runs. Changes to other fields are applied to future runs only. + NewSettings JobSettings `json:"new_settings"` +} + +type ResetResponse struct { +} + +type ResolvedConditionTaskValues struct { + Left string `json:"left,omitempty"` + + Right string `json:"right,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResolvedConditionTaskValues) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResolvedConditionTaskValues) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ResolvedDbtTaskValues struct { + Commands []string `json:"commands,omitempty"` +} + +type ResolvedNotebookTaskValues struct { + BaseParameters map[string]string `json:"base_parameters,omitempty"` +} + +type ResolvedParamPairValues struct { + Parameters map[string]string `json:"parameters,omitempty"` +} + +type ResolvedPythonWheelTaskValues struct { + NamedParameters map[string]string `json:"named_parameters,omitempty"` + + Parameters []string `json:"parameters,omitempty"` +} + +type ResolvedRunJobTaskValues struct { + JobParameters map[string]string `json:"job_parameters,omitempty"` + + Parameters map[string]string `json:"parameters,omitempty"` +} + +type ResolvedStringParamsValues struct { + Parameters []string `json:"parameters,omitempty"` +} + +type ResolvedValues struct { + ConditionTask *ResolvedConditionTaskValues `json:"condition_task,omitempty"` + + DbtTask *ResolvedDbtTaskValues `json:"dbt_task,omitempty"` + + NotebookTask *ResolvedNotebookTaskValues `json:"notebook_task,omitempty"` + + PythonWheelTask *ResolvedPythonWheelTaskValues `json:"python_wheel_task,omitempty"` + + RunJobTask *ResolvedRunJobTaskValues `json:"run_job_task,omitempty"` + + SimulationTask *ResolvedParamPairValues `json:"simulation_task,omitempty"` + + SparkJarTask *ResolvedStringParamsValues `json:"spark_jar_task,omitempty"` + + SparkPythonTask *ResolvedStringParamsValues `json:"spark_python_task,omitempty"` + + SparkSubmitTask *ResolvedStringParamsValues `json:"spark_submit_task,omitempty"` + + SqlTask *ResolvedParamPairValues `json:"sql_task,omitempty"` +} + +// Run was retrieved successfully +type Run struct { + // The sequence number of this run attempt for a triggered job run. The + // initial attempt of a run has an attempt_number of 0. If the initial run + // attempt fails, and the job has a retry policy (`max_retries` > 0), + // subsequent runs are created with an `original_attempt_run_id` of the + // original attempt’s ID and an incrementing `attempt_number`. Runs are + // retried only until they succeed, and the maximum `attempt_number` is the + // same as the `max_retries` value for the job. + AttemptNumber int `json:"attempt_number,omitempty"` + // The time in milliseconds it took to terminate the cluster and clean up + // any associated artifacts. The duration of a task run is the sum of the + // `setup_duration`, `execution_duration`, and the `cleanup_duration`. The + // `cleanup_duration` field is set to 0 for multitask job runs. The total + // duration of a multitask job run is the value of the `run_duration` field. + CleanupDuration int64 `json:"cleanup_duration,omitempty"` + // The cluster used for this run. If the run is specified to use a new + // cluster, this field is set once the Jobs service has requested a cluster + // for the run. + ClusterInstance *ClusterInstance `json:"cluster_instance,omitempty"` + // A snapshot of the job’s cluster specification when this run was + // created. + ClusterSpec *ClusterSpec `json:"cluster_spec,omitempty"` + // The creator user name. This field won’t be included in the response if + // the user has already been deleted. + CreatorUserName string `json:"creator_user_name,omitempty"` + // Description of the run + Description string `json:"description,omitempty"` + // effective_performance_target is the actual performance target used by the + // run during execution. effective_performance_target can differ from + // performance_target depending on if the job was eligible to be + // cost-optimized (e.g. contains at least 1 serverless task) or if we + // specifically override the value for the run (ex. RunNow). + EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` + // The time at which this run ended in epoch milliseconds (milliseconds + // since 1/1/1970 UTC). This field is set to 0 if the job is still running. + EndTime int64 `json:"end_time,omitempty"` + // The time in milliseconds it took to execute the commands in the JAR or + // notebook until they completed, failed, timed out, were cancelled, or + // encountered an unexpected error. The duration of a task run is the sum of + // the `setup_duration`, `execution_duration`, and the `cleanup_duration`. + // The `execution_duration` field is set to 0 for multitask job runs. The + // total duration of a multitask job run is the value of the `run_duration` + // field. + ExecutionDuration int64 `json:"execution_duration,omitempty"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. + // + // If `git_source` is set, these tasks retrieve the file from the remote + // repository by default. However, this behavior can be overridden by + // setting `source` to `WORKSPACE` on the task. + // + // Note: dbt and SQL File tasks support only version-controlled sources. If + // dbt or SQL File tasks are used, `git_source` must be defined on the job. + GitSource *GitSource `json:"git_source,omitempty"` + // Indicates if the run has more sub-resources (`tasks`, `job_clusters`) + // that are not shown. They can be accessed via :method:jobs/getrun + // endpoint. It is only relevant for API 2.2 :method:jobs/listruns requests + // with `expand_tasks=true`. + HasMore bool `json:"has_more,omitempty"` + // Only populated by for-each iterations. The parent for-each task is + // located in tasks array. + Iterations []RunTask `json:"iterations,omitempty"` + // A list of job cluster specifications that can be shared and reused by + // tasks of this job. Libraries cannot be declared in a shared job cluster. + // You must declare dependent libraries in task settings. If more than 100 + // job clusters are available, you can paginate through them using + // :method:jobs/getrun. + JobClusters []JobCluster `json:"job_clusters,omitempty"` + // The canonical identifier of the job that contains this run. + JobId int64 `json:"job_id,omitempty"` + // Job-level parameters used in the run + JobParameters []JobParameter `json:"job_parameters,omitempty"` + // ID of the job run that this run belongs to. For legacy and single-task + // job runs the field is populated with the job run ID. For task runs, the + // field is populated with the ID of the job run that the task run belongs + // to. + JobRunId int64 `json:"job_run_id,omitempty"` + // A token that can be used to list the next page of sub-resources. + NextPageToken string `json:"next_page_token,omitempty"` + // A unique identifier for this job run. This is set to the same value as + // `run_id`. + NumberInJob int64 `json:"number_in_job,omitempty"` + // If this run is a retry of a prior run attempt, this field contains the + // run_id of the original attempt; otherwise, it is the same as the run_id. + OriginalAttemptRunId int64 `json:"original_attempt_run_id,omitempty"` + // The parameters used for this run. + OverridingParameters *RunParameters `json:"overriding_parameters,omitempty"` + // The time in milliseconds that the run has spent in the queue. + QueueDuration int64 `json:"queue_duration,omitempty"` + // The repair history of the run. + RepairHistory []RepairHistoryItem `json:"repair_history,omitempty"` + // The time in milliseconds it took the job run and all of its repairs to + // finish. + RunDuration int64 `json:"run_duration,omitempty"` + // The canonical identifier of the run. This ID is unique across all runs of + // all jobs. + RunId int64 `json:"run_id,omitempty"` + // An optional name for the run. The maximum length is 4096 bytes in UTF-8 + // encoding. + RunName string `json:"run_name,omitempty"` + // The URL to the detail page of the run. + RunPageUrl string `json:"run_page_url,omitempty"` + // The type of a run. * `JOB_RUN`: Normal job run. A run created with + // :method:jobs/runNow. * `WORKFLOW_RUN`: Workflow run. A run created with + // [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit run. A run created with + // :method:jobs/submit. + // + // [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow + RunType RunType `json:"run_type,omitempty"` + // The cron schedule that triggered this run if it was triggered by the + // periodic scheduler. + Schedule *CronSchedule `json:"schedule,omitempty"` + // The time in milliseconds it took to set up the cluster. For runs that run + // on new clusters this is the cluster creation time, for runs that run on + // existing clusters this time should be very short. The duration of a task + // run is the sum of the `setup_duration`, `execution_duration`, and the + // `cleanup_duration`. The `setup_duration` field is set to 0 for multitask + // job runs. The total duration of a multitask job run is the value of the + // `run_duration` field. + SetupDuration int64 `json:"setup_duration,omitempty"` + // The time at which this run was started in epoch milliseconds + // (milliseconds since 1/1/1970 UTC). This may not be the time when the job + // task starts executing, for example, if the job is scheduled to run on a + // new cluster, this is the time the cluster creation call is issued. + StartTime int64 `json:"start_time,omitempty"` + // Deprecated. Please use the `status` field instead. + State *RunState `json:"state,omitempty"` + // The current status of the run + Status *RunStatus `json:"status,omitempty"` + // The list of tasks performed by the run. Each task has its own `run_id` + // which you can use to call `JobsGetOutput` to retrieve the run resutls. If + // more than 100 tasks are available, you can paginate through them using + // :method:jobs/getrun. Use the `next_page_token` field at the object root + // to determine if more results are available. + Tasks []RunTask `json:"tasks,omitempty"` + // The type of trigger that fired this run. + // + // * `PERIODIC`: Schedules that periodically trigger runs, such as a cron + // scheduler. * `ONE_TIME`: One time triggers that fire a single run. This + // occurs you triggered a single run on demand through the UI or the API. * + // `RETRY`: Indicates a run that is triggered as a retry of a previously + // failed run. This occurs when you request to re-run the job in case of + // failures. * `RUN_JOB_TASK`: Indicates a run that is triggered using a Run + // Job task. * `FILE_ARRIVAL`: Indicates a run that is triggered by a file + // arrival. * `TABLE`: Indicates a run that is triggered by a table update. + // * `CONTINUOUS_RESTART`: Indicates a run created by user to manually + // restart a continuous job run. + Trigger TriggerType `json:"trigger,omitempty"` + // Additional details about what triggered the run + TriggerInfo *TriggerInfo `json:"trigger_info,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Run) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Run) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RunConditionTask struct { + // The left operand of the condition task. Can be either a string value or a + // job state or parameter reference. + Left string `json:"left"` + // * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their + // operands. This means that `“12.0” == “12”` will evaluate to + // `false`. * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, + // `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their + // operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” + // >= “12”` will evaluate to `false`. + // + // The boolean comparison to task values can be implemented with operators + // `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it + // will be serialized to `“true”` or `“false”` for the comparison. + Op ConditionTaskOp `json:"op"` + // The condition expression evaluation result. Filled in if the task was + // successfully completed. Can be `"true"` or `"false"` + Outcome string `json:"outcome,omitempty"` + // The right operand of the condition task. Can be either a string value or + // a job state or parameter reference. + Right string `json:"right"` + + ForceSendFields []string `json:"-"` +} + +func (s *RunConditionTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RunConditionTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RunForEachTask struct { + // An optional maximum allowed number of concurrent runs of the task. Set + // this value if you want to be able to execute multiple runs of the task + // concurrently. + Concurrency int `json:"concurrency,omitempty"` + // Array for task to iterate on. This can be a JSON string or a reference to + // an array parameter. + Inputs string `json:"inputs"` + // Read only field. Populated for GetRun and ListRuns RPC calls and stores + // the execution stats of an For each task + Stats *ForEachStats `json:"stats,omitempty"` + // Configuration for the task that will be run for each element in the array + Task Task `json:"task"` + + ForceSendFields []string `json:"-"` +} + +func (s *RunForEachTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RunForEachTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// An optional value indicating the condition that determines whether the task +// should be run once its dependencies have been completed. When omitted, +// defaults to `ALL_SUCCESS`. +// +// Possible values are: * `ALL_SUCCESS`: All dependencies have executed and +// succeeded * `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded * +// `NONE_FAILED`: None of the dependencies have failed and at least one was +// executed * `ALL_DONE`: All dependencies have been completed * +// `AT_LEAST_ONE_FAILED`: At least one dependency failed * `ALL_FAILED`: ALl +// dependencies have failed +type RunIf string + +// All dependencies have been completed +const RunIfAllDone RunIf = `ALL_DONE` + +// ALl dependencies have failed +const RunIfAllFailed RunIf = `ALL_FAILED` + +// All dependencies have executed and succeeded +const RunIfAllSuccess RunIf = `ALL_SUCCESS` + +// At least one dependency failed +const RunIfAtLeastOneFailed RunIf = `AT_LEAST_ONE_FAILED` + +// At least one dependency has succeeded +const RunIfAtLeastOneSuccess RunIf = `AT_LEAST_ONE_SUCCESS` + +// None of the dependencies have failed and at least one was executed +const RunIfNoneFailed RunIf = `NONE_FAILED` + +// String representation for [fmt.Print] +func (f *RunIf) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunIf) Set(v string) error { + switch v { + case `ALL_DONE`, `ALL_FAILED`, `ALL_SUCCESS`, `AT_LEAST_ONE_FAILED`, `AT_LEAST_ONE_SUCCESS`, `NONE_FAILED`: + *f = RunIf(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALL_DONE", "ALL_FAILED", "ALL_SUCCESS", "AT_LEAST_ONE_FAILED", "AT_LEAST_ONE_SUCCESS", "NONE_FAILED"`, v) + } +} + +// Type always returns RunIf to satisfy [pflag.Value] interface +func (f *RunIf) Type() string { + return "RunIf" +} + +type RunJobOutput struct { + // The run id of the triggered job run + RunId int64 `json:"run_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RunJobOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RunJobOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RunJobTask struct { + // An array of commands to execute for jobs with the dbt task, for example + // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt + // run"]` + DbtCommands []string `json:"dbt_commands,omitempty"` + // A list of parameters for jobs with Spark JAR tasks, for example + // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the + // main function of the main class specified in the Spark JAR task. If not + // specified upon `run-now`, it defaults to an empty list. jar_params cannot + // be specified in conjunction with notebook_params. The JSON representation + // of this field (for example `{"jar_params":["john doe","35"]}`) cannot + // exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + JarParams []string `json:"jar_params,omitempty"` + // ID of the job to trigger. + JobId int64 `json:"job_id"` + // Job-level parameters used to trigger the job. + JobParameters map[string]string `json:"job_parameters,omitempty"` + // A map from keys to values for jobs with notebook task, for example + // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed + // to the notebook and is accessible through the [dbutils.widgets.get] + // function. + // + // If not specified upon `run-now`, the triggered run uses the job’s base + // parameters. + // + // notebook_params cannot be specified in conjunction with jar_params. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // The JSON representation of this field (for example + // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed + // 10,000 bytes. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + NotebookParams map[string]string `json:"notebook_params,omitempty"` + // Controls whether the pipeline should perform a full refresh + PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` + + PythonNamedParams map[string]string `json:"python_named_params,omitempty"` + // A list of parameters for jobs with Python tasks, for example + // `"python_params": ["john doe", "35"]`. The parameters are passed to + // Python file as command-line parameters. If specified upon `run-now`, it + // would overwrite the parameters specified in job setting. The JSON + // representation of this field (for example `{"python_params":["john + // doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + PythonParams []string `json:"python_params,omitempty"` + // A list of parameters for jobs with spark submit task, for example + // `"spark_submit_params": ["--class", + // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to + // spark-submit script as command-line parameters. If specified upon + // `run-now`, it would overwrite the parameters specified in job setting. + // The JSON representation of this field (for example + // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + SparkSubmitParams []string `json:"spark_submit_params,omitempty"` + // A map from keys to values for jobs with SQL task, for example + // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task + // does not support custom parameters. + SqlParams map[string]string `json:"sql_params,omitempty"` +} + +// A value indicating the run's lifecycle state. The possible values are: * +// `QUEUED`: The run is queued. * `PENDING`: The run is waiting to be executed +// while the cluster and execution context are being prepared. * `RUNNING`: The +// task of this run is being executed. * `TERMINATING`: The task of this run has +// completed, and the cluster and execution context are being cleaned up. * +// `TERMINATED`: The task of this run has completed, and the cluster and +// execution context have been cleaned up. This state is terminal. * `SKIPPED`: +// This run was aborted because a previous run of the same job was already +// active. This state is terminal. * `INTERNAL_ERROR`: An exceptional state that +// indicates a failure in the Jobs service, such as network failure over a long +// period. If a run on a new cluster ends in the `INTERNAL_ERROR` state, the +// Jobs service terminates the cluster as soon as possible. This state is +// terminal. * `BLOCKED`: The run is blocked on an upstream dependency. * +// `WAITING_FOR_RETRY`: The run is waiting for a retry. +type RunLifeCycleState string + +// The run is blocked on an upstream dependency. +const RunLifeCycleStateBlocked RunLifeCycleState = `BLOCKED` + +// An exceptional state that indicates a failure in the Jobs service, such as +// network failure over a long period. If a run on a new cluster ends in the +// `INTERNAL_ERROR` state, the Jobs service terminates the cluster as soon as +// possible. This state is terminal. +const RunLifeCycleStateInternalError RunLifeCycleState = `INTERNAL_ERROR` + +// The run is waiting to be executed while the cluster and execution context are +// being prepared. +const RunLifeCycleStatePending RunLifeCycleState = `PENDING` + +// The run is queued. +const RunLifeCycleStateQueued RunLifeCycleState = `QUEUED` + +// The task of this run is being executed. +const RunLifeCycleStateRunning RunLifeCycleState = `RUNNING` + +// This run was aborted because a previous run of the same job was already +// active. This state is terminal. +const RunLifeCycleStateSkipped RunLifeCycleState = `SKIPPED` + +// The task of this run has completed, and the cluster and execution context +// have been cleaned up. This state is terminal. +const RunLifeCycleStateTerminated RunLifeCycleState = `TERMINATED` + +// The task of this run has completed, and the cluster and execution context are +// being cleaned up. +const RunLifeCycleStateTerminating RunLifeCycleState = `TERMINATING` + +// The run is waiting for a retry. +const RunLifeCycleStateWaitingForRetry RunLifeCycleState = `WAITING_FOR_RETRY` + +// String representation for [fmt.Print] +func (f *RunLifeCycleState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunLifeCycleState) Set(v string) error { + switch v { + case `BLOCKED`, `INTERNAL_ERROR`, `PENDING`, `QUEUED`, `RUNNING`, `SKIPPED`, `TERMINATED`, `TERMINATING`, `WAITING_FOR_RETRY`: + *f = RunLifeCycleState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BLOCKED", "INTERNAL_ERROR", "PENDING", "QUEUED", "RUNNING", "SKIPPED", "TERMINATED", "TERMINATING", "WAITING_FOR_RETRY"`, v) + } +} + +// Type always returns RunLifeCycleState to satisfy [pflag.Value] interface +func (f *RunLifeCycleState) Type() string { + return "RunLifeCycleState" +} + +// The current state of the run. +type RunLifecycleStateV2State string + +const RunLifecycleStateV2StateBlocked RunLifecycleStateV2State = `BLOCKED` + +const RunLifecycleStateV2StatePending RunLifecycleStateV2State = `PENDING` + +const RunLifecycleStateV2StateQueued RunLifecycleStateV2State = `QUEUED` + +const RunLifecycleStateV2StateRunning RunLifecycleStateV2State = `RUNNING` + +const RunLifecycleStateV2StateTerminated RunLifecycleStateV2State = `TERMINATED` + +const RunLifecycleStateV2StateTerminating RunLifecycleStateV2State = `TERMINATING` + +// String representation for [fmt.Print] +func (f *RunLifecycleStateV2State) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunLifecycleStateV2State) Set(v string) error { + switch v { + case `BLOCKED`, `PENDING`, `QUEUED`, `RUNNING`, `TERMINATED`, `TERMINATING`: + *f = RunLifecycleStateV2State(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BLOCKED", "PENDING", "QUEUED", "RUNNING", "TERMINATED", "TERMINATING"`, v) + } +} + +// Type always returns RunLifecycleStateV2State to satisfy [pflag.Value] interface +func (f *RunLifecycleStateV2State) Type() string { + return "RunLifecycleStateV2State" +} + +type RunNow struct { + // An array of commands to execute for jobs with the dbt task, for example + // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt + // run"]` + DbtCommands []string `json:"dbt_commands,omitempty"` + // An optional token to guarantee the idempotency of job run requests. If a + // run with the provided token already exists, the request does not create a + // new run but returns the ID of the existing run instead. If a run with the + // provided token is deleted, an error is returned. + // + // If you specify the idempotency token, upon failure you can retry until + // the request succeeds. Databricks guarantees that exactly one run is + // launched with that idempotency token. + // + // This token must have at most 64 characters. + // + // For more information, see [How to ensure idempotency for jobs]. + // + // [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html + IdempotencyToken string `json:"idempotency_token,omitempty"` + // A list of parameters for jobs with Spark JAR tasks, for example + // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the + // main function of the main class specified in the Spark JAR task. If not + // specified upon `run-now`, it defaults to an empty list. jar_params cannot + // be specified in conjunction with notebook_params. The JSON representation + // of this field (for example `{"jar_params":["john doe","35"]}`) cannot + // exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + JarParams []string `json:"jar_params,omitempty"` + // The ID of the job to be executed + JobId int64 `json:"job_id"` + // Job-level parameters used in the run. for example `"param": + // "overriding_val"` + JobParameters map[string]string `json:"job_parameters,omitempty"` + // A map from keys to values for jobs with notebook task, for example + // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed + // to the notebook and is accessible through the [dbutils.widgets.get] + // function. + // + // If not specified upon `run-now`, the triggered run uses the job’s base + // parameters. + // + // notebook_params cannot be specified in conjunction with jar_params. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // The JSON representation of this field (for example + // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed + // 10,000 bytes. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + NotebookParams map[string]string `json:"notebook_params,omitempty"` + // A list of task keys to run inside of the job. If this field is not + // provided, all tasks in the job will be run. + Only []string `json:"only,omitempty"` + // PerformanceTarget defines how performant or cost efficient the execution + // of run on serverless compute should be. For RunNow request, the run will + // execute with this settings instead of ones defined in job. + PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` + // Controls whether the pipeline should perform a full refresh + PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` + + PythonNamedParams map[string]string `json:"python_named_params,omitempty"` + // A list of parameters for jobs with Python tasks, for example + // `"python_params": ["john doe", "35"]`. The parameters are passed to + // Python file as command-line parameters. If specified upon `run-now`, it + // would overwrite the parameters specified in job setting. The JSON + // representation of this field (for example `{"python_params":["john + // doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + PythonParams []string `json:"python_params,omitempty"` + // The queue settings of the run. + Queue *QueueSettings `json:"queue,omitempty"` + // A list of parameters for jobs with spark submit task, for example + // `"spark_submit_params": ["--class", + // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to + // spark-submit script as command-line parameters. If specified upon + // `run-now`, it would overwrite the parameters specified in job setting. + // The JSON representation of this field (for example + // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + SparkSubmitParams []string `json:"spark_submit_params,omitempty"` + // A map from keys to values for jobs with SQL task, for example + // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task + // does not support custom parameters. + SqlParams map[string]string `json:"sql_params,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RunNow) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RunNow) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Run was started successfully. +type RunNowResponse struct { + // A unique identifier for this job run. This is set to the same value as + // `run_id`. + NumberInJob int64 `json:"number_in_job,omitempty"` + // The globally unique ID of the newly triggered run. + RunId int64 `json:"run_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RunNowResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RunNowResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Run output was retrieved successfully. +type RunOutput struct { + // The output of a clean rooms notebook task, if available + CleanRoomsNotebookOutput *CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput `json:"clean_rooms_notebook_output,omitempty"` + // The output of a dbt task, if available. + DbtOutput *DbtOutput `json:"dbt_output,omitempty"` + // An error message indicating why a task failed or why output is not + // available. The message is unstructured, and its exact format is subject + // to change. + Error string `json:"error,omitempty"` + // If there was an error executing the run, this field contains any + // available stack traces. + ErrorTrace string `json:"error_trace,omitempty"` + + Info string `json:"info,omitempty"` + // The output from tasks that write to standard streams (stdout/stderr) such + // as spark_jar_task, spark_python_task, python_wheel_task. + // + // It's not supported for the notebook_task, pipeline_task or + // spark_submit_task. + // + // Databricks restricts this API to return the last 5 MB of these logs. + Logs string `json:"logs,omitempty"` + // Whether the logs are truncated. + LogsTruncated bool `json:"logs_truncated,omitempty"` + // All details of the run except for its output. + Metadata *Run `json:"metadata,omitempty"` + // The output of a notebook task, if available. A notebook task that + // terminates (either successfully or with a failure) without calling + // `dbutils.notebook.exit()` is considered to have an empty output. This + // field is set but its result value is empty. Databricks restricts this API + // to return the first 5 MB of the output. To return a larger result, use + // the [ClusterLogConf] field to configure log storage for the job cluster. + // + // [ClusterLogConf]: https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterlogconf + NotebookOutput *NotebookOutput `json:"notebook_output,omitempty"` + // The output of a run job task, if available + RunJobOutput *RunJobOutput `json:"run_job_output,omitempty"` + // The output of a SQL task, if available. + SqlOutput *SqlOutput `json:"sql_output,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RunOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RunOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RunParameters struct { + // An array of commands to execute for jobs with the dbt task, for example + // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt + // run"]` + DbtCommands []string `json:"dbt_commands,omitempty"` + // A list of parameters for jobs with Spark JAR tasks, for example + // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the + // main function of the main class specified in the Spark JAR task. If not + // specified upon `run-now`, it defaults to an empty list. jar_params cannot + // be specified in conjunction with notebook_params. The JSON representation + // of this field (for example `{"jar_params":["john doe","35"]}`) cannot + // exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + JarParams []string `json:"jar_params,omitempty"` + // A map from keys to values for jobs with notebook task, for example + // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed + // to the notebook and is accessible through the [dbutils.widgets.get] + // function. + // + // If not specified upon `run-now`, the triggered run uses the job’s base + // parameters. + // + // notebook_params cannot be specified in conjunction with jar_params. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // The JSON representation of this field (for example + // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed + // 10,000 bytes. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + NotebookParams map[string]string `json:"notebook_params,omitempty"` + // Controls whether the pipeline should perform a full refresh + PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` + + PythonNamedParams map[string]string `json:"python_named_params,omitempty"` + // A list of parameters for jobs with Python tasks, for example + // `"python_params": ["john doe", "35"]`. The parameters are passed to + // Python file as command-line parameters. If specified upon `run-now`, it + // would overwrite the parameters specified in job setting. The JSON + // representation of this field (for example `{"python_params":["john + // doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + PythonParams []string `json:"python_params,omitempty"` + // A list of parameters for jobs with spark submit task, for example + // `"spark_submit_params": ["--class", + // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to + // spark-submit script as command-line parameters. If specified upon + // `run-now`, it would overwrite the parameters specified in job setting. + // The JSON representation of this field (for example + // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + SparkSubmitParams []string `json:"spark_submit_params,omitempty"` + // A map from keys to values for jobs with SQL task, for example + // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task + // does not support custom parameters. + SqlParams map[string]string `json:"sql_params,omitempty"` +} + +// A value indicating the run's result. The possible values are: * `SUCCESS`: +// The task completed successfully. * `FAILED`: The task completed with an +// error. * `TIMEDOUT`: The run was stopped after reaching the timeout. * +// `CANCELED`: The run was canceled at user request. * +// `MAXIMUM_CONCURRENT_RUNS_REACHED`: The run was skipped because the maximum +// concurrent runs were reached. * `EXCLUDED`: The run was skipped because the +// necessary conditions were not met. * `SUCCESS_WITH_FAILURES`: The job run +// completed successfully with some failures; leaf tasks were successful. * +// `UPSTREAM_FAILED`: The run was skipped because of an upstream failure. * +// `UPSTREAM_CANCELED`: The run was skipped because an upstream task was +// canceled. * `DISABLED`: The run was skipped because it was disabled +// explicitly by the user. +type RunResultState string + +// The run was canceled at user request. +const RunResultStateCanceled RunResultState = `CANCELED` + +// The run was skipped because it was disabled explicitly by the user. +const RunResultStateDisabled RunResultState = `DISABLED` + +// The run was skipped because the necessary conditions were not met. +const RunResultStateExcluded RunResultState = `EXCLUDED` + +// The task completed with an error. +const RunResultStateFailed RunResultState = `FAILED` + +// The run was skipped because the maximum concurrent runs were reached. +const RunResultStateMaximumConcurrentRunsReached RunResultState = `MAXIMUM_CONCURRENT_RUNS_REACHED` + +// The task completed successfully. +const RunResultStateSuccess RunResultState = `SUCCESS` + +// The job run completed successfully with some failures; leaf tasks were +// successful. +const RunResultStateSuccessWithFailures RunResultState = `SUCCESS_WITH_FAILURES` + +// The run was stopped after reaching the timeout. +const RunResultStateTimedout RunResultState = `TIMEDOUT` + +// The run was skipped because an upstream task was canceled. +const RunResultStateUpstreamCanceled RunResultState = `UPSTREAM_CANCELED` + +// The run was skipped because of an upstream failure. +const RunResultStateUpstreamFailed RunResultState = `UPSTREAM_FAILED` + +// String representation for [fmt.Print] +func (f *RunResultState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunResultState) Set(v string) error { + switch v { + case `CANCELED`, `DISABLED`, `EXCLUDED`, `FAILED`, `MAXIMUM_CONCURRENT_RUNS_REACHED`, `SUCCESS`, `SUCCESS_WITH_FAILURES`, `TIMEDOUT`, `UPSTREAM_CANCELED`, `UPSTREAM_FAILED`: + *f = RunResultState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "DISABLED", "EXCLUDED", "FAILED", "MAXIMUM_CONCURRENT_RUNS_REACHED", "SUCCESS", "SUCCESS_WITH_FAILURES", "TIMEDOUT", "UPSTREAM_CANCELED", "UPSTREAM_FAILED"`, v) + } +} + +// Type always returns RunResultState to satisfy [pflag.Value] interface +func (f *RunResultState) Type() string { + return "RunResultState" +} + +// The current state of the run. +type RunState struct { + // A value indicating the run's current lifecycle state. This field is + // always available in the response. + LifeCycleState RunLifeCycleState `json:"life_cycle_state,omitempty"` + // The reason indicating why the run was queued. + QueueReason string `json:"queue_reason,omitempty"` + // A value indicating the run's result. This field is only available for + // terminal lifecycle states. + ResultState RunResultState `json:"result_state,omitempty"` + // A descriptive message for the current state. This field is unstructured, + // and its exact format is subject to change. + StateMessage string `json:"state_message,omitempty"` + // A value indicating whether a run was canceled manually by a user or by + // the scheduler because the run timed out. + UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RunState) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RunState) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The current status of the run +type RunStatus struct { + // If the run was queued, details about the reason for queuing the run. + QueueDetails *QueueDetails `json:"queue_details,omitempty"` + // The current state of the run. + State RunLifecycleStateV2State `json:"state,omitempty"` + // If the run is in a TERMINATING or TERMINATED state, details about the + // reason for terminating the run. + TerminationDetails *TerminationDetails `json:"termination_details,omitempty"` +} + +// Used when outputting a child run, in GetRun or ListRuns. +type RunTask struct { + // The sequence number of this run attempt for a triggered job run. The + // initial attempt of a run has an attempt_number of 0. If the initial run + // attempt fails, and the job has a retry policy (`max_retries` > 0), + // subsequent runs are created with an `original_attempt_run_id` of the + // original attempt’s ID and an incrementing `attempt_number`. Runs are + // retried only until they succeed, and the maximum `attempt_number` is the + // same as the `max_retries` value for the job. + AttemptNumber int `json:"attempt_number,omitempty"` + // The task runs a [clean rooms] notebook when the + // `clean_rooms_notebook_task` field is present. + // + // [clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html + CleanRoomsNotebookTask *CleanRoomsNotebookTask `json:"clean_rooms_notebook_task,omitempty"` + // The time in milliseconds it took to terminate the cluster and clean up + // any associated artifacts. The duration of a task run is the sum of the + // `setup_duration`, `execution_duration`, and the `cleanup_duration`. The + // `cleanup_duration` field is set to 0 for multitask job runs. The total + // duration of a multitask job run is the value of the `run_duration` field. + CleanupDuration int64 `json:"cleanup_duration,omitempty"` + // The cluster used for this run. If the run is specified to use a new + // cluster, this field is set once the Jobs service has requested a cluster + // for the run. + ClusterInstance *ClusterInstance `json:"cluster_instance,omitempty"` + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. + ConditionTask *RunConditionTask `json:"condition_task,omitempty"` + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. + DbtTask *DbtTask `json:"dbt_task,omitempty"` + // An optional array of objects specifying the dependency graph of the task. + // All tasks specified in this field must complete successfully before + // executing this task. The key is `task_key`, and the value is the name + // assigned to the dependent task. + DependsOn []TaskDependency `json:"depends_on,omitempty"` + // An optional description for this task. + Description string `json:"description,omitempty"` + // Denotes whether or not the task was disabled by the user. Disabled tasks + // do not execute and are immediately skipped as soon as they are unblocked. + Disabled bool `json:"disabled,omitempty"` + // effective_performance_target is the actual performance target used by the + // run during execution. effective_performance_target can differ from + // performance_target depending on if the job was eligible to be + // cost-optimized (e.g. contains at least 1 serverless task) or if an + // override was provided for the run (ex. RunNow). + EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` + // An optional set of email addresses notified when the task run begins or + // completes. The default behavior is to not send any emails. + EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` + // The time at which this run ended in epoch milliseconds (milliseconds + // since 1/1/1970 UTC). This field is set to 0 if the job is still running. + EndTime int64 `json:"end_time,omitempty"` + // The key that references an environment spec in a job. This field is + // required for Python script, Python wheel and dbt tasks when using + // serverless compute. + EnvironmentKey string `json:"environment_key,omitempty"` + // The time in milliseconds it took to execute the commands in the JAR or + // notebook until they completed, failed, timed out, were cancelled, or + // encountered an unexpected error. The duration of a task run is the sum of + // the `setup_duration`, `execution_duration`, and the `cleanup_duration`. + // The `execution_duration` field is set to 0 for multitask job runs. The + // total duration of a multitask job run is the value of the `run_duration` + // field. + ExecutionDuration int64 `json:"execution_duration,omitempty"` + // If existing_cluster_id, the ID of an existing cluster that is used for + // all runs. When running jobs or tasks on an existing cluster, you may need + // to manually restart the cluster if it stops responding. We suggest + // running jobs and tasks on new clusters for greater reliability + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. + ForEachTask *RunForEachTask `json:"for_each_task,omitempty"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. If `git_source` is set, + // these tasks retrieve the file from the remote repository by default. + // However, this behavior can be overridden by setting `source` to + // `WORKSPACE` on the task. Note: dbt and SQL File tasks support only + // version-controlled sources. If dbt or SQL File tasks are used, + // `git_source` must be defined on the job. + GitSource *GitSource `json:"git_source,omitempty"` + // If job_cluster_key, this task is executed reusing the cluster specified + // in `job.settings.job_clusters`. + JobClusterKey string `json:"job_cluster_key,omitempty"` + // An optional list of libraries to be installed on the cluster. The default + // value is an empty list. + Libraries []Library `json:"libraries,omitempty"` + // If new_cluster, a description of a new cluster that is created for each + // run. + NewCluster *ClusterSpec `json:"new_cluster,omitempty"` + // The task runs a notebook when the `notebook_task` field is present. + NotebookTask *NotebookTask `json:"notebook_task,omitempty"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // task run. + NotificationSettings *TaskNotificationSettings `json:"notification_settings,omitempty"` + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. + PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` + // The task runs a Python wheel when the `python_wheel_task` field is + // present. + PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` + // The time in milliseconds that the run has spent in the queue. + QueueDuration int64 `json:"queue_duration,omitempty"` + // Parameter values including resolved references + ResolvedValues *ResolvedValues `json:"resolved_values,omitempty"` + // The time in milliseconds it took the job run and all of its repairs to + // finish. + RunDuration int64 `json:"run_duration,omitempty"` + // The ID of the task run. + RunId int64 `json:"run_id,omitempty"` + // An optional value indicating the condition that determines whether the + // task should be run once its dependencies have been completed. When + // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of + // possible values. + RunIf RunIf `json:"run_if,omitempty"` + // The task triggers another job when the `run_job_task` field is present. + RunJobTask *RunJobTask `json:"run_job_task,omitempty"` + + RunPageUrl string `json:"run_page_url,omitempty"` + // The time in milliseconds it took to set up the cluster. For runs that run + // on new clusters this is the cluster creation time, for runs that run on + // existing clusters this time should be very short. The duration of a task + // run is the sum of the `setup_duration`, `execution_duration`, and the + // `cleanup_duration`. The `setup_duration` field is set to 0 for multitask + // job runs. The total duration of a multitask job run is the value of the + // `run_duration` field. + SetupDuration int64 `json:"setup_duration,omitempty"` + // The task runs a JAR when the `spark_jar_task` field is present. + SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` + // The task runs a Python file when the `spark_python_task` field is + // present. + SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. + // + // In the `new_cluster` specification, `libraries` and `spark_conf` are not + // supported. Instead, use `--jars` and `--py-files` to add Java and Python + // libraries and `--conf` to set the Spark configurations. + // + // `master`, `deploy-mode`, and `executor-cores` are automatically + // configured by Databricks; you _cannot_ specify them in parameters. + // + // By default, the Spark submit job uses all available memory (excluding + // reserved memory for Databricks services). You can set `--driver-memory`, + // and `--executor-memory` to a smaller value to leave some room for + // off-heap usage. + // + // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 + // paths. + SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. + SqlTask *SqlTask `json:"sql_task,omitempty"` + // The time at which this run was started in epoch milliseconds + // (milliseconds since 1/1/1970 UTC). This may not be the time when the job + // task starts executing, for example, if the job is scheduled to run on a + // new cluster, this is the time the cluster creation call is issued. + StartTime int64 `json:"start_time,omitempty"` + // Deprecated. Please use the `status` field instead. + State *RunState `json:"state,omitempty"` + // The current status of the run + Status *RunStatus `json:"status,omitempty"` + // A unique name for the task. This field is used to refer to this task from + // other tasks. This field is required and must be unique within its parent + // job. On Update or Reset, this field is used to reference the tasks to be + // updated or reset. + TaskKey string `json:"task_key"` + // An optional timeout applied to each run of this job task. A value of `0` + // means no timeout. + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + // A collection of system notification IDs to notify when the run begins or + // completes. The default behavior is to not send any system notifications. + // Task webhooks respect the task notification settings. + WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RunTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RunTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of a run. * `JOB_RUN`: Normal job run. A run created with +// :method:jobs/runNow. * `WORKFLOW_RUN`: Workflow run. A run created with +// [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit run. A run created with +// :method:jobs/submit. +// +// [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow +type RunType string + +// Normal job run. A run created with :method:jobs/runNow. +const RunTypeJobRun RunType = `JOB_RUN` + +// Submit run. A run created with :method:jobs/submit. +const RunTypeSubmitRun RunType = `SUBMIT_RUN` + +// Workflow run. A run created with [dbutils.notebook.run]. +// +// [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow +const RunTypeWorkflowRun RunType = `WORKFLOW_RUN` + +// String representation for [fmt.Print] +func (f *RunType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunType) Set(v string) error { + switch v { + case `JOB_RUN`, `SUBMIT_RUN`, `WORKFLOW_RUN`: + *f = RunType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "JOB_RUN", "SUBMIT_RUN", "WORKFLOW_RUN"`, v) + } +} + +// Type always returns RunType to satisfy [pflag.Value] interface +func (f *RunType) Type() string { + return "RunType" +} + +// Determines the cluster's runtime engine, either standard or Photon. +// +// This field is not compatible with legacy `spark_version` values that contain +// `-photon-`. Remove `-photon-` from the `spark_version` and set +// `runtime_engine` to `PHOTON`. +// +// If left unspecified, the runtime engine defaults to standard unless the +// spark_version contains -photon-, in which case Photon will be used. +type RuntimeEngine string + +const RuntimeEngineNull RuntimeEngine = `NULL` + +const RuntimeEnginePhoton RuntimeEngine = `PHOTON` + +const RuntimeEngineStandard RuntimeEngine = `STANDARD` + +// String representation for [fmt.Print] +func (f *RuntimeEngine) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RuntimeEngine) Set(v string) error { + switch v { + case `NULL`, `PHOTON`, `STANDARD`: + *f = RuntimeEngine(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "NULL", "PHOTON", "STANDARD"`, v) + } +} + +// Type always returns RuntimeEngine to satisfy [pflag.Value] interface +func (f *RuntimeEngine) Type() string { + return "RuntimeEngine" +} + +type S3StorageInfo struct { + // (Optional) Set canned access control list for the logs, e.g. + // `bucket-owner-full-control`. If `canned_cal` is set, please make sure the + // cluster iam role has `s3:PutObjectAcl` permission on the destination + // bucket and prefix. The full list of possible canned acl can be found at + // http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl. + // Please also note that by default only the object owner gets full + // controls. If you are using cross account role for writing data, you may + // want to set `bucket-owner-full-control` to make bucket owner able to read + // the logs. + CannedAcl string `json:"canned_acl,omitempty"` + // S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be + // delivered using cluster iam role, please make sure you set cluster iam + // role and the role has write access to the destination. Please also note + // that you cannot use AWS keys to deliver logs. + Destination string `json:"destination"` + // (Optional) Flag to enable server side encryption, `false` by default. + EnableEncryption bool `json:"enable_encryption,omitempty"` + // (Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It + // will be used only when encryption is enabled and the default type is + // `sse-s3`. + EncryptionType string `json:"encryption_type,omitempty"` + // S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or + // endpoint needs to be set. If both are set, endpoint will be used. + Endpoint string `json:"endpoint,omitempty"` + // (Optional) Kms key which will be used if encryption is enabled and + // encryption type is set to `sse-kms`. + KmsKey string `json:"kms_key,omitempty"` + // S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. + // If both are set, endpoint will be used. + Region string `json:"region,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *S3StorageInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s S3StorageInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file +// will be retrieved\ from the local Databricks workspace. When set to `GIT`, +// the SQL file will be retrieved from a Git repository defined in `git_source`. +// If the value is empty, the task will use `GIT` if `git_source` is defined and +// `WORKSPACE` otherwise. +// +// * `WORKSPACE`: SQL file is located in Databricks workspace. * `GIT`: SQL file +// is located in cloud Git provider. +type Source string + +// SQL file is located in cloud Git provider. +const SourceGit Source = `GIT` + +// SQL file is located in workspace. +const SourceWorkspace Source = `WORKSPACE` + +// String representation for [fmt.Print] +func (f *Source) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Source) Set(v string) error { + switch v { + case `GIT`, `WORKSPACE`: + *f = Source(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GIT", "WORKSPACE"`, v) + } +} + +// Type always returns Source to satisfy [pflag.Value] interface +func (f *Source) Type() string { + return "Source" +} + +type SparkJarTask struct { + // Deprecated since 04/2016. Provide a `jar` through the `libraries` field + // instead. For an example, see :method:jobs/create. + JarUri string `json:"jar_uri,omitempty"` + // The full name of the class containing the main method to be executed. + // This class must be contained in a JAR provided as a library. + // + // The code must use `SparkContext.getOrCreate` to obtain a Spark context; + // otherwise, runs of the job fail. + MainClassName string `json:"main_class_name,omitempty"` + // Parameters passed to the main method. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + Parameters []string `json:"parameters,omitempty"` + // Deprecated. A value of `false` is no longer supported. + RunAsRepl bool `json:"run_as_repl,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SparkJarTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SparkJarTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SparkPythonTask struct { + // Command line parameters passed to the Python file. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + Parameters []string `json:"parameters,omitempty"` + // The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, + // adls:/, gcs:/) and workspace paths are supported. For python files stored + // in the Databricks workspace, the path must be absolute and begin with + // `/`. For files stored in a remote repository, the path must be relative. + // This field is required. + PythonFile string `json:"python_file"` + // Optional location type of the Python file. When set to `WORKSPACE` or not + // specified, the file will be retrieved from the local Databricks workspace + // or cloud location (if the `python_file` has a URI format). When set to + // `GIT`, the Python file will be retrieved from a Git repository defined in + // `git_source`. + // + // * `WORKSPACE`: The Python file is located in a Databricks workspace or at + // a cloud filesystem URI. * `GIT`: The Python file is located in a remote + // Git repository. + Source Source `json:"source,omitempty"` +} + +type SparkSubmitTask struct { + // Command-line parameters passed to spark submit. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + Parameters []string `json:"parameters,omitempty"` +} + +type SqlAlertOutput struct { + // The state of the SQL alert. + // + // * UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not + // fulfill trigger conditions * TRIGGERED: alert evaluated and fulfilled + // trigger conditions + AlertState SqlAlertState `json:"alert_state,omitempty"` + // The link to find the output results. + OutputLink string `json:"output_link,omitempty"` + // The text of the SQL query. Can Run permission of the SQL query associated + // with the SQL alert is required to view this field. + QueryText string `json:"query_text,omitempty"` + // Information about SQL statements executed in the run. + SqlStatements []SqlStatementOutput `json:"sql_statements,omitempty"` + // The canonical identifier of the SQL warehouse. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SqlAlertOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SqlAlertOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The state of the SQL alert. +// +// * UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not +// fulfill trigger conditions * TRIGGERED: alert evaluated and fulfilled trigger +// conditions +type SqlAlertState string + +const SqlAlertStateOk SqlAlertState = `OK` + +const SqlAlertStateTriggered SqlAlertState = `TRIGGERED` + +const SqlAlertStateUnknown SqlAlertState = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *SqlAlertState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SqlAlertState) Set(v string) error { + switch v { + case `OK`, `TRIGGERED`, `UNKNOWN`: + *f = SqlAlertState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "OK", "TRIGGERED", "UNKNOWN"`, v) + } +} + +// Type always returns SqlAlertState to satisfy [pflag.Value] interface +func (f *SqlAlertState) Type() string { + return "SqlAlertState" +} + +type SqlDashboardOutput struct { + // The canonical identifier of the SQL warehouse. + WarehouseId string `json:"warehouse_id,omitempty"` + // Widgets executed in the run. Only SQL query based widgets are listed. + Widgets []SqlDashboardWidgetOutput `json:"widgets,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SqlDashboardOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SqlDashboardOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SqlDashboardWidgetOutput struct { + // Time (in epoch milliseconds) when execution of the SQL widget ends. + EndTime int64 `json:"end_time,omitempty"` + // The information about the error when execution fails. + Error *SqlOutputError `json:"error,omitempty"` + // The link to find the output results. + OutputLink string `json:"output_link,omitempty"` + // Time (in epoch milliseconds) when execution of the SQL widget starts. + StartTime int64 `json:"start_time,omitempty"` + // The execution status of the SQL widget. + Status SqlDashboardWidgetOutputStatus `json:"status,omitempty"` + // The canonical identifier of the SQL widget. + WidgetId string `json:"widget_id,omitempty"` + // The title of the SQL widget. + WidgetTitle string `json:"widget_title,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SqlDashboardWidgetOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SqlDashboardWidgetOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SqlDashboardWidgetOutputStatus string + +const SqlDashboardWidgetOutputStatusCancelled SqlDashboardWidgetOutputStatus = `CANCELLED` + +const SqlDashboardWidgetOutputStatusFailed SqlDashboardWidgetOutputStatus = `FAILED` + +const SqlDashboardWidgetOutputStatusPending SqlDashboardWidgetOutputStatus = `PENDING` + +const SqlDashboardWidgetOutputStatusRunning SqlDashboardWidgetOutputStatus = `RUNNING` + +const SqlDashboardWidgetOutputStatusSuccess SqlDashboardWidgetOutputStatus = `SUCCESS` + +// String representation for [fmt.Print] +func (f *SqlDashboardWidgetOutputStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SqlDashboardWidgetOutputStatus) Set(v string) error { + switch v { + case `CANCELLED`, `FAILED`, `PENDING`, `RUNNING`, `SUCCESS`: + *f = SqlDashboardWidgetOutputStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELLED", "FAILED", "PENDING", "RUNNING", "SUCCESS"`, v) + } +} + +// Type always returns SqlDashboardWidgetOutputStatus to satisfy [pflag.Value] interface +func (f *SqlDashboardWidgetOutputStatus) Type() string { + return "SqlDashboardWidgetOutputStatus" +} + +type SqlOutput struct { + // The output of a SQL alert task, if available. + AlertOutput *SqlAlertOutput `json:"alert_output,omitempty"` + // The output of a SQL dashboard task, if available. + DashboardOutput *SqlDashboardOutput `json:"dashboard_output,omitempty"` + // The output of a SQL query task, if available. + QueryOutput *SqlQueryOutput `json:"query_output,omitempty"` +} + +type SqlOutputError struct { + // The error message when execution fails. + Message string `json:"message,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SqlOutputError) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SqlOutputError) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SqlQueryOutput struct { + EndpointId string `json:"endpoint_id,omitempty"` + // The link to find the output results. + OutputLink string `json:"output_link,omitempty"` + // The text of the SQL query. Can Run permission of the SQL query is + // required to view this field. + QueryText string `json:"query_text,omitempty"` + // Information about SQL statements executed in the run. + SqlStatements []SqlStatementOutput `json:"sql_statements,omitempty"` + // The canonical identifier of the SQL warehouse. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SqlQueryOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SqlQueryOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SqlStatementOutput struct { + // A key that can be used to look up query details. + LookupKey string `json:"lookup_key,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SqlStatementOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SqlStatementOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SqlTask struct { + // If alert, indicates that this job must refresh a SQL alert. + Alert *SqlTaskAlert `json:"alert,omitempty"` + // If dashboard, indicates that this job must refresh a SQL dashboard. + Dashboard *SqlTaskDashboard `json:"dashboard,omitempty"` + // If file, indicates that this job runs a SQL file in a remote Git + // repository. + File *SqlTaskFile `json:"file,omitempty"` + // Parameters to be used for each run of this job. The SQL alert task does + // not support custom parameters. + Parameters map[string]string `json:"parameters,omitempty"` + // If query, indicates that this job must execute a SQL query. + Query *SqlTaskQuery `json:"query,omitempty"` + // The canonical identifier of the SQL warehouse. Recommended to use with + // serverless or pro SQL warehouses. Classic SQL warehouses are only + // supported for SQL alert, dashboard and query tasks and are limited to + // scheduled single-task jobs. + WarehouseId string `json:"warehouse_id"` +} + +type SqlTaskAlert struct { + // The canonical identifier of the SQL alert. + AlertId string `json:"alert_id"` + // If true, the alert notifications are not sent to subscribers. + PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` + // If specified, alert notifications are sent to subscribers. + Subscriptions []SqlTaskSubscription `json:"subscriptions,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SqlTaskAlert) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SqlTaskAlert) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SqlTaskDashboard struct { + // Subject of the email sent to subscribers of this task. + CustomSubject string `json:"custom_subject,omitempty"` + // The canonical identifier of the SQL dashboard. + DashboardId string `json:"dashboard_id"` + // If true, the dashboard snapshot is not taken, and emails are not sent to + // subscribers. + PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` + // If specified, dashboard snapshots are sent to subscriptions. + Subscriptions []SqlTaskSubscription `json:"subscriptions,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SqlTaskDashboard) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SqlTaskDashboard) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SqlTaskFile struct { + // Path of the SQL file. Must be relative if the source is a remote Git + // repository and absolute for workspace paths. + Path string `json:"path"` + // Optional location type of the SQL file. When set to `WORKSPACE`, the SQL + // file will be retrieved from the local Databricks workspace. When set to + // `GIT`, the SQL file will be retrieved from a Git repository defined in + // `git_source`. If the value is empty, the task will use `GIT` if + // `git_source` is defined and `WORKSPACE` otherwise. + // + // * `WORKSPACE`: SQL file is located in Databricks workspace. * `GIT`: SQL + // file is located in cloud Git provider. + Source Source `json:"source,omitempty"` +} + +type SqlTaskQuery struct { + // The canonical identifier of the SQL query. + QueryId string `json:"query_id"` +} + +type SqlTaskSubscription struct { + // The canonical identifier of the destination to receive email + // notification. This parameter is mutually exclusive with user_name. You + // cannot set both destination_id and user_name for subscription + // notifications. + DestinationId string `json:"destination_id,omitempty"` + // The user name to receive the subscription email. This parameter is + // mutually exclusive with destination_id. You cannot set both + // destination_id and user_name for subscription notifications. + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SqlTaskSubscription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SqlTaskSubscription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SubmitRun struct { + // List of permissions to set on the job. + AccessControlList []JobAccessControlRequest `json:"access_control_list,omitempty"` + // The user specified id of the budget policy to use for this one-time run. + // If not specified, the run will be not be attributed to any budget policy. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` + // An optional set of email addresses notified when the run begins or + // completes. + EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` + // A list of task execution environment specifications that can be + // referenced by tasks of this run. + Environments []JobEnvironment `json:"environments,omitempty"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. + // + // If `git_source` is set, these tasks retrieve the file from the remote + // repository by default. However, this behavior can be overridden by + // setting `source` to `WORKSPACE` on the task. + // + // Note: dbt and SQL File tasks support only version-controlled sources. If + // dbt or SQL File tasks are used, `git_source` must be defined on the job. + GitSource *GitSource `json:"git_source,omitempty"` + // An optional set of health rules that can be defined for this job. + Health *JobsHealthRules `json:"health,omitempty"` + // An optional token that can be used to guarantee the idempotency of job + // run requests. If a run with the provided token already exists, the + // request does not create a new run but returns the ID of the existing run + // instead. If a run with the provided token is deleted, an error is + // returned. + // + // If you specify the idempotency token, upon failure you can retry until + // the request succeeds. Databricks guarantees that exactly one run is + // launched with that idempotency token. + // + // This token must have at most 64 characters. + // + // For more information, see [How to ensure idempotency for jobs]. + // + // [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html + IdempotencyToken string `json:"idempotency_token,omitempty"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // run. + NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` + // The queue settings of the one-time run. + Queue *QueueSettings `json:"queue,omitempty"` + // Specifies the user or service principal that the job runs as. If not + // specified, the job runs as the user who submits the request. + RunAs *JobRunAs `json:"run_as,omitempty"` + // An optional name for the run. The default value is `Untitled`. + RunName string `json:"run_name,omitempty"` + + Tasks []SubmitTask `json:"tasks,omitempty"` + // An optional timeout applied to each run of this job. A value of `0` means + // no timeout. + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + // A collection of system notification IDs to notify when the run begins or + // completes. + WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SubmitRun) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SubmitRun) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Run was created and started successfully. +type SubmitRunResponse struct { + // The canonical identifier for the newly submitted run. + RunId int64 `json:"run_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SubmitRunResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SubmitRunResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SubmitTask struct { + // The task runs a [clean rooms] notebook when the + // `clean_rooms_notebook_task` field is present. + // + // [clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html + CleanRoomsNotebookTask *CleanRoomsNotebookTask `json:"clean_rooms_notebook_task,omitempty"` + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. + ConditionTask *ConditionTask `json:"condition_task,omitempty"` + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. + DbtTask *DbtTask `json:"dbt_task,omitempty"` + // An optional array of objects specifying the dependency graph of the task. + // All tasks specified in this field must complete successfully before + // executing this task. The key is `task_key`, and the value is the name + // assigned to the dependent task. + DependsOn []TaskDependency `json:"depends_on,omitempty"` + // An optional description for this task. + Description string `json:"description,omitempty"` + // An optional set of email addresses notified when the task run begins or + // completes. The default behavior is to not send any emails. + EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` + // The key that references an environment spec in a job. This field is + // required for Python script, Python wheel and dbt tasks when using + // serverless compute. + EnvironmentKey string `json:"environment_key,omitempty"` + // If existing_cluster_id, the ID of an existing cluster that is used for + // all runs. When running jobs or tasks on an existing cluster, you may need + // to manually restart the cluster if it stops responding. We suggest + // running jobs and tasks on new clusters for greater reliability + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. + ForEachTask *ForEachTask `json:"for_each_task,omitempty"` + // An optional set of health rules that can be defined for this job. + Health *JobsHealthRules `json:"health,omitempty"` + // An optional list of libraries to be installed on the cluster. The default + // value is an empty list. + Libraries []Library `json:"libraries,omitempty"` + // If new_cluster, a description of a new cluster that is created for each + // run. + NewCluster *JobsClusterSpec `json:"new_cluster,omitempty"` + // The task runs a notebook when the `notebook_task` field is present. + NotebookTask *NotebookTask `json:"notebook_task,omitempty"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // task run. + NotificationSettings *TaskNotificationSettings `json:"notification_settings,omitempty"` + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. + PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` + // The task runs a Python wheel when the `python_wheel_task` field is + // present. + PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` + // An optional value indicating the condition that determines whether the + // task should be run once its dependencies have been completed. When + // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of + // possible values. + RunIf RunIf `json:"run_if,omitempty"` + // The task triggers another job when the `run_job_task` field is present. + RunJobTask *RunJobTask `json:"run_job_task,omitempty"` + // The task runs a JAR when the `spark_jar_task` field is present. + SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` + // The task runs a Python file when the `spark_python_task` field is + // present. + SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. + // + // In the `new_cluster` specification, `libraries` and `spark_conf` are not + // supported. Instead, use `--jars` and `--py-files` to add Java and Python + // libraries and `--conf` to set the Spark configurations. + // + // `master`, `deploy-mode`, and `executor-cores` are automatically + // configured by Databricks; you _cannot_ specify them in parameters. + // + // By default, the Spark submit job uses all available memory (excluding + // reserved memory for Databricks services). You can set `--driver-memory`, + // and `--executor-memory` to a smaller value to leave some room for + // off-heap usage. + // + // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 + // paths. + SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. + SqlTask *SqlTask `json:"sql_task,omitempty"` + // A unique name for the task. This field is used to refer to this task from + // other tasks. This field is required and must be unique within its parent + // job. On Update or Reset, this field is used to reference the tasks to be + // updated or reset. + TaskKey string `json:"task_key"` + // An optional timeout applied to each run of this job task. A value of `0` + // means no timeout. + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + // A collection of system notification IDs to notify when the run begins or + // completes. The default behavior is to not send any system notifications. + // Task webhooks respect the task notification settings. + WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SubmitTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SubmitTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TableUpdateTriggerConfiguration struct { + // The table(s) condition based on which to trigger a job run. + Condition Condition `json:"condition,omitempty"` + // If set, the trigger starts a run only after the specified amount of time + // has passed since the last time the trigger fired. The minimum allowed + // value is 60 seconds. + MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` + // A list of Delta tables to monitor for changes. The table name must be in + // the format `catalog_name.schema_name.table_name`. + TableNames []string `json:"table_names,omitempty"` + // If set, the trigger starts a run only after no table updates have + // occurred for the specified time and can be used to wait for a series of + // table updates before triggering a run. The minimum allowed value is 60 + // seconds. + WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TableUpdateTriggerConfiguration) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TableUpdateTriggerConfiguration) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Task struct { + // The task runs a [clean rooms] notebook when the + // `clean_rooms_notebook_task` field is present. + // + // [clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html + CleanRoomsNotebookTask *CleanRoomsNotebookTask `json:"clean_rooms_notebook_task,omitempty"` + // The task evaluates a condition that can be used to control the execution + // of other tasks when the `condition_task` field is present. The condition + // task does not require a cluster to execute and does not support retries + // or notifications. + ConditionTask *ConditionTask `json:"condition_task,omitempty"` + // The task runs one or more dbt commands when the `dbt_task` field is + // present. The dbt task requires both Databricks SQL and the ability to use + // a serverless or a pro SQL warehouse. + DbtTask *DbtTask `json:"dbt_task,omitempty"` + // An optional array of objects specifying the dependency graph of the task. + // All tasks specified in this field must complete before executing this + // task. The task will run only if the `run_if` condition is true. The key + // is `task_key`, and the value is the name assigned to the dependent task. + DependsOn []TaskDependency `json:"depends_on,omitempty"` + // An optional description for this task. + Description string `json:"description,omitempty"` + // An option to disable auto optimization in serverless + DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` + // An optional set of email addresses that is notified when runs of this + // task begin or complete as well as when this task is deleted. The default + // behavior is to not send any emails. + EmailNotifications *TaskEmailNotifications `json:"email_notifications,omitempty"` + // The key that references an environment spec in a job. This field is + // required for Python script, Python wheel and dbt tasks when using + // serverless compute. + EnvironmentKey string `json:"environment_key,omitempty"` + // If existing_cluster_id, the ID of an existing cluster that is used for + // all runs. When running jobs or tasks on an existing cluster, you may need + // to manually restart the cluster if it stops responding. We suggest + // running jobs and tasks on new clusters for greater reliability + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + // The task executes a nested task for every input provided when the + // `for_each_task` field is present. + ForEachTask *ForEachTask `json:"for_each_task,omitempty"` + // An optional set of health rules that can be defined for this job. + Health *JobsHealthRules `json:"health,omitempty"` + // If job_cluster_key, this task is executed reusing the cluster specified + // in `job.settings.job_clusters`. + JobClusterKey string `json:"job_cluster_key,omitempty"` + // An optional list of libraries to be installed on the cluster. The default + // value is an empty list. + Libraries []Library `json:"libraries,omitempty"` + // An optional maximum number of times to retry an unsuccessful run. A run + // is considered to be unsuccessful if it completes with the `FAILED` + // result_state or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means + // to retry indefinitely and the value `0` means to never retry. + MaxRetries int `json:"max_retries,omitempty"` + // An optional minimal interval in milliseconds between the start of the + // failed run and the subsequent retry run. The default behavior is that + // unsuccessful runs are immediately retried. + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + // If new_cluster, a description of a new cluster that is created for each + // run. + NewCluster *JobsClusterSpec `json:"new_cluster,omitempty"` + // The task runs a notebook when the `notebook_task` field is present. + NotebookTask *NotebookTask `json:"notebook_task,omitempty"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // task. + NotificationSettings *TaskNotificationSettings `json:"notification_settings,omitempty"` + // The task triggers a pipeline update when the `pipeline_task` field is + // present. Only pipelines configured to use triggered more are supported. + PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` + // The task runs a Python wheel when the `python_wheel_task` field is + // present. + PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` + // An optional policy to specify whether to retry a job when it times out. + // The default behavior is to not retry on timeout. + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + // An optional value specifying the condition determining whether the task + // is run once its dependencies have been completed. + // + // * `ALL_SUCCESS`: All dependencies have executed and succeeded * + // `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded * + // `NONE_FAILED`: None of the dependencies have failed and at least one was + // executed * `ALL_DONE`: All dependencies have been completed * + // `AT_LEAST_ONE_FAILED`: At least one dependency failed * `ALL_FAILED`: ALl + // dependencies have failed + RunIf RunIf `json:"run_if,omitempty"` + // The task triggers another job when the `run_job_task` field is present. + RunJobTask *RunJobTask `json:"run_job_task,omitempty"` + // The task runs a JAR when the `spark_jar_task` field is present. + SparkJarTask *SparkJarTask `json:"spark_jar_task,omitempty"` + // The task runs a Python file when the `spark_python_task` field is + // present. + SparkPythonTask *SparkPythonTask `json:"spark_python_task,omitempty"` + // (Legacy) The task runs the spark-submit script when the + // `spark_submit_task` field is present. This task can run only on new + // clusters and is not compatible with serverless compute. + // + // In the `new_cluster` specification, `libraries` and `spark_conf` are not + // supported. Instead, use `--jars` and `--py-files` to add Java and Python + // libraries and `--conf` to set the Spark configurations. + // + // `master`, `deploy-mode`, and `executor-cores` are automatically + // configured by Databricks; you _cannot_ specify them in parameters. + // + // By default, the Spark submit job uses all available memory (excluding + // reserved memory for Databricks services). You can set `--driver-memory`, + // and `--executor-memory` to a smaller value to leave some room for + // off-heap usage. + // + // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 + // paths. + SparkSubmitTask *SparkSubmitTask `json:"spark_submit_task,omitempty"` + // The task runs a SQL query or file, or it refreshes a SQL alert or a + // legacy SQL dashboard when the `sql_task` field is present. + SqlTask *SqlTask `json:"sql_task,omitempty"` + // A unique name for the task. This field is used to refer to this task from + // other tasks. This field is required and must be unique within its parent + // job. On Update or Reset, this field is used to reference the tasks to be + // updated or reset. + TaskKey string `json:"task_key"` + // An optional timeout applied to each run of this job task. A value of `0` + // means no timeout. + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + // A collection of system notification IDs to notify when runs of this task + // begin or complete. The default behavior is to not send any system + // notifications. + WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Task) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Task) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TaskDependency struct { + // Can only be specified on condition task dependencies. The outcome of the + // dependent task that must be met for this task to run. + Outcome string `json:"outcome,omitempty"` + // The name of the task this task depends on. + TaskKey string `json:"task_key"` + + ForceSendFields []string `json:"-"` +} + +func (s *TaskDependency) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TaskDependency) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TaskEmailNotifications struct { + // If true, do not send email to recipients specified in `on_failure` if the + // run is skipped. This field is `deprecated`. Please use the + // `notification_settings.no_alert_for_skipped_runs` field. + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` + // A list of email addresses to be notified when the duration of a run + // exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in + // the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is + // specified in the `health` field for the job, notifications are not sent. + OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` + // A list of email addresses to be notified when a run unsuccessfully + // completes. A run is considered to have completed unsuccessfully if it + // ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or + // `TIMED_OUT` result_state. If this is not specified on job creation, + // reset, or update the list is empty, and notifications are not sent. + OnFailure []string `json:"on_failure,omitempty"` + // A list of email addresses to be notified when a run begins. If not + // specified on job creation, reset, or update, the list is empty, and + // notifications are not sent. + OnStart []string `json:"on_start,omitempty"` + // A list of email addresses to notify when any streaming backlog thresholds + // are exceeded for any stream. Streaming backlog thresholds can be set in + // the `health` field using the following metrics: + // `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, + // `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is + // based on the 10-minute average of these metrics. If the issue persists, + // notifications are resent every 30 minutes. + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` + // A list of email addresses to be notified when a run successfully + // completes. A run is considered to have completed successfully if it ends + // with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If + // not specified on job creation, reset, or update, the list is empty, and + // notifications are not sent. + OnSuccess []string `json:"on_success,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TaskEmailNotifications) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TaskEmailNotifications) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TaskNotificationSettings struct { + // If true, do not send notifications to recipients specified in `on_start` + // for the retried runs and do not send notifications to recipients + // specified in `on_failure` until the last retry of the run. + AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` + // If true, do not send notifications to recipients specified in + // `on_failure` if the run is canceled. + NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"` + // If true, do not send notifications to recipients specified in + // `on_failure` if the run is skipped. + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TaskNotificationSettings) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TaskNotificationSettings) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The code indicates why the run was terminated. Additional codes might be +// introduced in future releases. * `SUCCESS`: The run was completed +// successfully. * `USER_CANCELED`: The run was successfully canceled during +// execution by a user. * `CANCELED`: The run was canceled during execution by +// the Databricks platform; for example, if the maximum run duration was +// exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream +// task run failed, the dependency type condition was not met, or there were no +// material tasks to execute. * `INTERNAL_ERROR`: The run encountered an +// unexpected error. Refer to the state message for further details. * +// `DRIVER_ERROR`: The run encountered an error while communicating with the +// Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer +// to the state message for further details. * `REPOSITORY_CHECKOUT_FAILED`: +// Failed to complete the checkout due to an error when communicating with the +// third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because it +// issued an invalid request to start the cluster. * +// `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the +// maximum number of concurrent active runs. Consider scheduling the runs over a +// larger time frame. * `FEATURE_DISABLED`: The run failed because it tried to +// access a feature unavailable for the workspace. * +// `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and +// upsize requests have exceeded the allotted rate limit. Consider spreading the +// run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run +// failed due to an error when accessing the customer blob storage. Refer to the +// state message for further details. * `RUN_EXECUTION_ERROR`: The run was +// completed with task failures. For more details, refer to the state message or +// run output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue +// while accessing a resource. Refer to the state message for further details. * +// `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the +// user-requested library. Refer to the state message for further details. The +// causes might include, but are not limited to: The provided library is +// invalid, there are insufficient permissions to install the library, and so +// forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit +// of maximum concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: +// The run is scheduled on a cluster that has already reached the maximum number +// of contexts it is configured to create. See: [Link]. * `RESOURCE_NOT_FOUND`: +// A resource necessary for run execution does not exist. Refer to the state +// message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed +// due to an invalid configuration. Refer to the state message for further +// details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. +// Refer to the state message for further details. * +// `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job +// level queue size limit. +// +// [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now +type TerminationCodeCode string + +const TerminationCodeCodeBudgetPolicyLimitExceeded TerminationCodeCode = `BUDGET_POLICY_LIMIT_EXCEEDED` + +// The run was canceled during execution by the platform; for +// example, if the maximum run duration was exceeded. +const TerminationCodeCodeCanceled TerminationCodeCode = `CANCELED` + +// The run failed due to a cloud provider issue. Refer to the state message for +// further details. +const TerminationCodeCodeCloudFailure TerminationCodeCode = `CLOUD_FAILURE` + +// The run failed due to a cluster error. Refer to the state message for further +// details. +const TerminationCodeCodeClusterError TerminationCodeCode = `CLUSTER_ERROR` + +// The number of cluster creation, start, and upsize requests have exceeded the +// allotted rate limit. Consider spreading the run execution over a larger time +// frame. +const TerminationCodeCodeClusterRequestLimitExceeded TerminationCodeCode = `CLUSTER_REQUEST_LIMIT_EXCEEDED` + +// The run encountered an error while communicating with the Spark Driver. +const TerminationCodeCodeDriverError TerminationCodeCode = `DRIVER_ERROR` + +// The run failed because it tried to access a feature unavailable for the +// workspace. +const TerminationCodeCodeFeatureDisabled TerminationCodeCode = `FEATURE_DISABLED` + +// The run encountered an unexpected error. Refer to the state message for +// further details. +const TerminationCodeCodeInternalError TerminationCodeCode = `INTERNAL_ERROR` + +// The run failed because it issued an invalid request to start the cluster. +const TerminationCodeCodeInvalidClusterRequest TerminationCodeCode = `INVALID_CLUSTER_REQUEST` + +// The run failed due to an invalid configuration. Refer to the state message +// for further details. +const TerminationCodeCodeInvalidRunConfiguration TerminationCodeCode = `INVALID_RUN_CONFIGURATION` + +// The run failed while installing the user-requested library. Refer to the +// state message for further details. The causes might include, but are not +// limited to: The provided library is invalid, there are insufficient +// permissions to install the library, and so forth. +const TerminationCodeCodeLibraryInstallationError TerminationCodeCode = `LIBRARY_INSTALLATION_ERROR` + +// The scheduled run exceeds the limit of maximum concurrent runs set for the +// job. +const TerminationCodeCodeMaxConcurrentRunsExceeded TerminationCodeCode = `MAX_CONCURRENT_RUNS_EXCEEDED` + +// The run was skipped due to reaching the job level queue size limit. +const TerminationCodeCodeMaxJobQueueSizeExceeded TerminationCodeCode = `MAX_JOB_QUEUE_SIZE_EXCEEDED` + +// The run is scheduled on a cluster that has already reached the maximum number +// of contexts it is configured to create. See: [Link]. +// +// [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now +const TerminationCodeCodeMaxSparkContextsExceeded TerminationCodeCode = `MAX_SPARK_CONTEXTS_EXCEEDED` + +// Failed to complete the checkout due to an error when communicating with the +// third party service. +const TerminationCodeCodeRepositoryCheckoutFailed TerminationCodeCode = `REPOSITORY_CHECKOUT_FAILED` + +// A resource necessary for run execution does not exist. Refer to the state +// message for further details. +const TerminationCodeCodeResourceNotFound TerminationCodeCode = `RESOURCE_NOT_FOUND` + +// The run was completed with task failures. For more details, refer to the +// state message or run output. +const TerminationCodeCodeRunExecutionError TerminationCodeCode = `RUN_EXECUTION_ERROR` + +// Run was never executed, for example, if the upstream task run failed, the +// dependency type condition was not met, or there were no material tasks to +// execute. +const TerminationCodeCodeSkipped TerminationCodeCode = `SKIPPED` + +// The run failed due to an error when accessing the customer blob storage. +// Refer to the state message for further details. +const TerminationCodeCodeStorageAccessError TerminationCodeCode = `STORAGE_ACCESS_ERROR` + +// The run was completed successfully. +const TerminationCodeCodeSuccess TerminationCodeCode = `SUCCESS` + +// The run failed due to a permission issue while accessing a resource. Refer to +// the state message for further details. +const TerminationCodeCodeUnauthorizedError TerminationCodeCode = `UNAUTHORIZED_ERROR` + +// The run was successfully canceled during execution by a user. +const TerminationCodeCodeUserCanceled TerminationCodeCode = `USER_CANCELED` + +// The workspace has reached the quota for the maximum number of concurrent +// active runs. Consider scheduling the runs over a larger time frame. +const TerminationCodeCodeWorkspaceRunLimitExceeded TerminationCodeCode = `WORKSPACE_RUN_LIMIT_EXCEEDED` + +// String representation for [fmt.Print] +func (f *TerminationCodeCode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationCodeCode) Set(v string) error { + switch v { + case `BUDGET_POLICY_LIMIT_EXCEEDED`, `CANCELED`, `CLOUD_FAILURE`, `CLUSTER_ERROR`, `CLUSTER_REQUEST_LIMIT_EXCEEDED`, `DRIVER_ERROR`, `FEATURE_DISABLED`, `INTERNAL_ERROR`, `INVALID_CLUSTER_REQUEST`, `INVALID_RUN_CONFIGURATION`, `LIBRARY_INSTALLATION_ERROR`, `MAX_CONCURRENT_RUNS_EXCEEDED`, `MAX_JOB_QUEUE_SIZE_EXCEEDED`, `MAX_SPARK_CONTEXTS_EXCEEDED`, `REPOSITORY_CHECKOUT_FAILED`, `RESOURCE_NOT_FOUND`, `RUN_EXECUTION_ERROR`, `SKIPPED`, `STORAGE_ACCESS_ERROR`, `SUCCESS`, `UNAUTHORIZED_ERROR`, `USER_CANCELED`, `WORKSPACE_RUN_LIMIT_EXCEEDED`: + *f = TerminationCodeCode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BUDGET_POLICY_LIMIT_EXCEEDED", "CANCELED", "CLOUD_FAILURE", "CLUSTER_ERROR", "CLUSTER_REQUEST_LIMIT_EXCEEDED", "DRIVER_ERROR", "FEATURE_DISABLED", "INTERNAL_ERROR", "INVALID_CLUSTER_REQUEST", "INVALID_RUN_CONFIGURATION", "LIBRARY_INSTALLATION_ERROR", "MAX_CONCURRENT_RUNS_EXCEEDED", "MAX_JOB_QUEUE_SIZE_EXCEEDED", "MAX_SPARK_CONTEXTS_EXCEEDED", "REPOSITORY_CHECKOUT_FAILED", "RESOURCE_NOT_FOUND", "RUN_EXECUTION_ERROR", "SKIPPED", "STORAGE_ACCESS_ERROR", "SUCCESS", "UNAUTHORIZED_ERROR", "USER_CANCELED", "WORKSPACE_RUN_LIMIT_EXCEEDED"`, v) + } +} + +// Type always returns TerminationCodeCode to satisfy [pflag.Value] interface +func (f *TerminationCodeCode) Type() string { + return "TerminationCodeCode" +} + +type TerminationDetails struct { + // The code indicates why the run was terminated. Additional codes might be + // introduced in future releases. * `SUCCESS`: The run was completed + // successfully. * `USER_CANCELED`: The run was successfully canceled during + // execution by a user. * `CANCELED`: The run was canceled during execution + // by the Databricks platform; for example, if the maximum run duration was + // exceeded. * `SKIPPED`: Run was never executed, for example, if the + // upstream task run failed, the dependency type condition was not met, or + // there were no material tasks to execute. * `INTERNAL_ERROR`: The run + // encountered an unexpected error. Refer to the state message for further + // details. * `DRIVER_ERROR`: The run encountered an error while + // communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed + // due to a cluster error. Refer to the state message for further details. * + // `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an + // error when communicating with the third party service. * + // `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid + // request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The + // workspace has reached the quota for the maximum number of concurrent + // active runs. Consider scheduling the runs over a larger time frame. * + // `FEATURE_DISABLED`: The run failed because it tried to access a feature + // unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The + // number of cluster creation, start, and upsize requests have exceeded the + // allotted rate limit. Consider spreading the run execution over a larger + // time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when + // accessing the customer blob storage. Refer to the state message for + // further details. * `RUN_EXECUTION_ERROR`: The run was completed with task + // failures. For more details, refer to the state message or run output. * + // `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while + // accessing a resource. Refer to the state message for further details. * + // `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the + // user-requested library. Refer to the state message for further details. + // The causes might include, but are not limited to: The provided library is + // invalid, there are insufficient permissions to install the library, and + // so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the + // limit of maximum concurrent runs set for the job. * + // `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has + // already reached the maximum number of contexts it is configured to + // create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run + // execution does not exist. Refer to the state message for further details. + // * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid + // configuration. Refer to the state message for further details. * + // `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to + // the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: + // The run was skipped due to reaching the job level queue size limit. + // + // [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now + Code TerminationCodeCode `json:"code,omitempty"` + // A descriptive message with the termination details. This field is + // unstructured and the format might change. + Message string `json:"message,omitempty"` + // * `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An + // error occurred in the Databricks platform. Please look at the [status + // page] or contact support if the issue persists. * `CLIENT_ERROR`: The run + // was terminated because of an error caused by user input or the job + // configuration. * `CLOUD_FAILURE`: The run was terminated because of an + // issue with your cloud provider. + // + // [status page]: https://status.databricks.com/ + Type TerminationTypeType `json:"type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TerminationDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TerminationDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// * `SUCCESS`: The run terminated without any issues * `INTERNAL_ERROR`: An +// error occurred in the Databricks platform. Please look at the [status page] +// or contact support if the issue persists. * `CLIENT_ERROR`: The run was +// terminated because of an error caused by user input or the job configuration. +// * `CLOUD_FAILURE`: The run was terminated because of an issue with your cloud +// provider. +// +// [status page]: https://status.databricks.com/ +type TerminationTypeType string + +// The run was terminated because of an error caused by user input or the job +// configuration. +const TerminationTypeTypeClientError TerminationTypeType = `CLIENT_ERROR` + +// The run was terminated because of an issue with your cloud provider. +const TerminationTypeTypeCloudFailure TerminationTypeType = `CLOUD_FAILURE` + +// An error occurred in the platform. Please look at the [status +// page] or contact support if the issue persists. +// +// [status page]: https://status.databricks.com/ +const TerminationTypeTypeInternalError TerminationTypeType = `INTERNAL_ERROR` + +// The run terminated without any issues +const TerminationTypeTypeSuccess TerminationTypeType = `SUCCESS` + +// String representation for [fmt.Print] +func (f *TerminationTypeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationTypeType) Set(v string) error { + switch v { + case `CLIENT_ERROR`, `CLOUD_FAILURE`, `INTERNAL_ERROR`, `SUCCESS`: + *f = TerminationTypeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLIENT_ERROR", "CLOUD_FAILURE", "INTERNAL_ERROR", "SUCCESS"`, v) + } +} + +// Type always returns TerminationTypeType to satisfy [pflag.Value] interface +func (f *TerminationTypeType) Type() string { + return "TerminationTypeType" +} + +// Additional details about what triggered the run +type TriggerInfo struct { + // The run id of the Run Job task run + RunId int64 `json:"run_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TriggerInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TriggerInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TriggerSettings struct { + // File arrival trigger settings. + FileArrival *FileArrivalTriggerConfiguration `json:"file_arrival,omitempty"` + // Whether this trigger is paused or not. + PauseStatus PauseStatus `json:"pause_status,omitempty"` + // Periodic trigger settings. + Periodic *PeriodicTriggerConfiguration `json:"periodic,omitempty"` + // Old table trigger settings name. Deprecated in favor of `table_update`. + Table *TableUpdateTriggerConfiguration `json:"table,omitempty"` + + TableUpdate *TableUpdateTriggerConfiguration `json:"table_update,omitempty"` +} + +// The type of trigger that fired this run. +// +// * `PERIODIC`: Schedules that periodically trigger runs, such as a cron +// scheduler. * `ONE_TIME`: One time triggers that fire a single run. This +// occurs you triggered a single run on demand through the UI or the API. * +// `RETRY`: Indicates a run that is triggered as a retry of a previously failed +// run. This occurs when you request to re-run the job in case of failures. * +// `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * +// `FILE_ARRIVAL`: Indicates a run that is triggered by a file arrival. * +// `TABLE`: Indicates a run that is triggered by a table update. * +// `CONTINUOUS_RESTART`: Indicates a run created by user to manually restart a +// continuous job run. +type TriggerType string + +// Indicates a run that is triggered by a file arrival. +const TriggerTypeFileArrival TriggerType = `FILE_ARRIVAL` + +// One time triggers that fire a single run. This occurs you triggered a single +// run on demand through the UI or the API. +const TriggerTypeOneTime TriggerType = `ONE_TIME` + +// Schedules that periodically trigger runs, such as a cron scheduler. +const TriggerTypePeriodic TriggerType = `PERIODIC` + +// Indicates a run that is triggered as a retry of a previously failed run. This +// occurs when you request to re-run the job in case of failures. +const TriggerTypeRetry TriggerType = `RETRY` + +// Indicates a run that is triggered using a Run Job task. +const TriggerTypeRunJobTask TriggerType = `RUN_JOB_TASK` + +// Indicates a run that is triggered by a table update. +const TriggerTypeTable TriggerType = `TABLE` + +// String representation for [fmt.Print] +func (f *TriggerType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TriggerType) Set(v string) error { + switch v { + case `FILE_ARRIVAL`, `ONE_TIME`, `PERIODIC`, `RETRY`, `RUN_JOB_TASK`, `TABLE`: + *f = TriggerType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FILE_ARRIVAL", "ONE_TIME", "PERIODIC", "RETRY", "RUN_JOB_TASK", "TABLE"`, v) + } +} + +// Type always returns TriggerType to satisfy [pflag.Value] interface +func (f *TriggerType) Type() string { + return "TriggerType" +} + +type UpdateJob struct { + // Remove top-level fields in the job settings. Removing nested fields is + // not supported, except for tasks and job clusters (`tasks/task_1`). This + // field is optional. + FieldsToRemove []string `json:"fields_to_remove,omitempty"` + // The canonical identifier of the job to update. This field is required. + JobId int64 `json:"job_id"` + // The new settings for the job. + // + // Top-level fields specified in `new_settings` are completely replaced, + // except for arrays which are merged. That is, new and existing entries are + // completely replaced based on the respective key fields, i.e. `task_key` + // or `job_cluster_key`, while previous entries are kept. + // + // Partially updating nested fields is not supported. + // + // Changes to the field `JobSettings.timeout_seconds` are applied to active + // runs. Changes to other fields are applied to future runs only. + NewSettings *JobSettings `json:"new_settings,omitempty"` +} + +type UpdateResponse struct { +} + +type ViewItem struct { + // Content of the view. + Content string `json:"content,omitempty"` + // Name of the view item. In the case of code view, it would be the + // notebook’s name. In the case of dashboard view, it would be the + // dashboard’s name. + Name string `json:"name,omitempty"` + // Type of the view item. + Type ViewType `json:"type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ViewItem) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ViewItem) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// * `NOTEBOOK`: Notebook view item. * `DASHBOARD`: Dashboard view item. +type ViewType string + +// Dashboard view item. +const ViewTypeDashboard ViewType = `DASHBOARD` + +// Notebook view item. +const ViewTypeNotebook ViewType = `NOTEBOOK` + +// String representation for [fmt.Print] +func (f *ViewType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ViewType) Set(v string) error { + switch v { + case `DASHBOARD`, `NOTEBOOK`: + *f = ViewType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DASHBOARD", "NOTEBOOK"`, v) + } +} + +// Type always returns ViewType to satisfy [pflag.Value] interface +func (f *ViewType) Type() string { + return "ViewType" +} + +// * `CODE`: Code view of the notebook. * `DASHBOARDS`: All dashboard views of +// the notebook. * `ALL`: All views of the notebook. +type ViewsToExport string + +// All views of the notebook. +const ViewsToExportAll ViewsToExport = `ALL` + +// Code view of the notebook. +const ViewsToExportCode ViewsToExport = `CODE` + +// All dashboard views of the notebook. +const ViewsToExportDashboards ViewsToExport = `DASHBOARDS` + +// String representation for [fmt.Print] +func (f *ViewsToExport) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ViewsToExport) Set(v string) error { + switch v { + case `ALL`, `CODE`, `DASHBOARDS`: + *f = ViewsToExport(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALL", "CODE", "DASHBOARDS"`, v) + } +} + +// Type always returns ViewsToExport to satisfy [pflag.Value] interface +func (f *ViewsToExport) Type() string { + return "ViewsToExport" +} + +type VolumesStorageInfo struct { + // Unity Catalog volumes file destination, e.g. + // `/Volumes/catalog/schema/volume/dir/file` + Destination string `json:"destination"` +} + +type Webhook struct { + Id string `json:"id"` +} + +type WebhookNotifications struct { + // An optional list of system notification IDs to call when the duration of + // a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` + // metric in the `health` field. A maximum of 3 destinations can be + // specified for the `on_duration_warning_threshold_exceeded` property. + OnDurationWarningThresholdExceeded []Webhook `json:"on_duration_warning_threshold_exceeded,omitempty"` + // An optional list of system notification IDs to call when the run fails. A + // maximum of 3 destinations can be specified for the `on_failure` property. + OnFailure []Webhook `json:"on_failure,omitempty"` + // An optional list of system notification IDs to call when the run starts. + // A maximum of 3 destinations can be specified for the `on_start` property. + OnStart []Webhook `json:"on_start,omitempty"` + // An optional list of system notification IDs to call when any streaming + // backlog thresholds are exceeded for any stream. Streaming backlog + // thresholds can be set in the `health` field using the following metrics: + // `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, + // `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is + // based on the 10-minute average of these metrics. If the issue persists, + // notifications are resent every 30 minutes. A maximum of 3 destinations + // can be specified for the `on_streaming_backlog_exceeded` property. + OnStreamingBacklogExceeded []Webhook `json:"on_streaming_backlog_exceeded,omitempty"` + // An optional list of system notification IDs to call when the run + // completes successfully. A maximum of 3 destinations can be specified for + // the `on_success` property. + OnSuccess []Webhook `json:"on_success,omitempty"` +} + +type WorkloadType struct { + // defined what type of clients can use the cluster. E.g. Notebooks, Jobs + Clients ClientsTypes `json:"clients"` +} + +type WorkspaceStorageInfo struct { + // workspace files destination, e.g. + // `/Users/user1@databricks.com/my-init.sh` + Destination string `json:"destination"` +} diff --git a/marketplace/v2preview/api.go b/marketplace/v2preview/api.go new file mode 100755 index 000000000..63ba34419 --- /dev/null +++ b/marketplace/v2preview/api.go @@ -0,0 +1,1528 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Consumer Fulfillments Preview, Consumer Installations Preview, Consumer Listings Preview, Consumer Personalization Requests Preview, Consumer Providers Preview, Provider Exchange Filters Preview, Provider Exchanges Preview, Provider Files Preview, Provider Listings Preview, Provider Personalization Requests Preview, Provider Provider Analytics Dashboards Preview, Provider Providers Preview, etc. +package marketplacepreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type ConsumerFulfillmentsPreviewInterface interface { + + // Get listing content metadata. + // + // Get a high level preview of the metadata of listing installable content. + // + // This method is generated by Databricks SDK Code Generator. + Get(ctx context.Context, request GetListingContentMetadataRequest) listing.Iterator[SharedDataObject] + + // Get listing content metadata. + // + // Get a high level preview of the metadata of listing installable content. + // + // This method is generated by Databricks SDK Code Generator. + GetAll(ctx context.Context, request GetListingContentMetadataRequest) ([]SharedDataObject, error) + + // Get listing content metadata. + // + // Get a high level preview of the metadata of listing installable content. + GetByListingId(ctx context.Context, listingId string) (*GetListingContentMetadataResponse, error) + + // List all listing fulfillments. + // + // Get all listings fulfillments associated with a listing. A _fulfillment_ is a + // potential installation. Standard installations contain metadata about the + // attached share or git repo. Only one of these fields will be present. + // Personalized installations contain metadata about the attached share or git + // repo, as well as the Delta Sharing recipient type. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListFulfillmentsRequest) listing.Iterator[ListingFulfillment] + + // List all listing fulfillments. + // + // Get all listings fulfillments associated with a listing. A _fulfillment_ is a + // potential installation. Standard installations contain metadata about the + // attached share or git repo. Only one of these fields will be present. + // Personalized installations contain metadata about the attached share or git + // repo, as well as the Delta Sharing recipient type. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListFulfillmentsRequest) ([]ListingFulfillment, error) + + // List all listing fulfillments. + // + // Get all listings fulfillments associated with a listing. A _fulfillment_ is a + // potential installation. Standard installations contain metadata about the + // attached share or git repo. Only one of these fields will be present. + // Personalized installations contain metadata about the attached share or git + // repo, as well as the Delta Sharing recipient type. + ListByListingId(ctx context.Context, listingId string) (*ListFulfillmentsResponse, error) +} + +func NewConsumerFulfillmentsPreview(client *client.DatabricksClient) *ConsumerFulfillmentsPreviewAPI { + return &ConsumerFulfillmentsPreviewAPI{ + consumerFulfillmentsPreviewImpl: consumerFulfillmentsPreviewImpl{ + client: client, + }, + } +} + +// Fulfillments are entities that allow consumers to preview installations. +type ConsumerFulfillmentsPreviewAPI struct { + consumerFulfillmentsPreviewImpl +} + +// Get listing content metadata. +// +// Get a high level preview of the metadata of listing installable content. +func (a *ConsumerFulfillmentsPreviewAPI) GetByListingId(ctx context.Context, listingId string) (*GetListingContentMetadataResponse, error) { + return a.consumerFulfillmentsPreviewImpl.internalGet(ctx, GetListingContentMetadataRequest{ + ListingId: listingId, + }) +} + +// List all listing fulfillments. +// +// Get all listings fulfillments associated with a listing. A _fulfillment_ is a +// potential installation. Standard installations contain metadata about the +// attached share or git repo. Only one of these fields will be present. +// Personalized installations contain metadata about the attached share or git +// repo, as well as the Delta Sharing recipient type. +func (a *ConsumerFulfillmentsPreviewAPI) ListByListingId(ctx context.Context, listingId string) (*ListFulfillmentsResponse, error) { + return a.consumerFulfillmentsPreviewImpl.internalList(ctx, ListFulfillmentsRequest{ + ListingId: listingId, + }) +} + +type ConsumerInstallationsPreviewInterface interface { + + // Install from a listing. + // + // Install payload associated with a Databricks Marketplace listing. + Create(ctx context.Context, request CreateInstallationRequest) (*Installation, error) + + // Uninstall from a listing. + // + // Uninstall an installation associated with a Databricks Marketplace listing. + Delete(ctx context.Context, request DeleteInstallationRequest) error + + // Uninstall from a listing. + // + // Uninstall an installation associated with a Databricks Marketplace listing. + DeleteByListingIdAndInstallationId(ctx context.Context, listingId string, installationId string) error + + // List all installations. + // + // List all installations across all listings. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAllInstallationsRequest) listing.Iterator[InstallationDetail] + + // List all installations. + // + // List all installations across all listings. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAllInstallationsRequest) ([]InstallationDetail, error) + + // List installations for a listing. + // + // List all installations for a particular listing. + // + // This method is generated by Databricks SDK Code Generator. + ListListingInstallations(ctx context.Context, request ListInstallationsRequest) listing.Iterator[InstallationDetail] + + // List installations for a listing. + // + // List all installations for a particular listing. + // + // This method is generated by Databricks SDK Code Generator. + ListListingInstallationsAll(ctx context.Context, request ListInstallationsRequest) ([]InstallationDetail, error) + + // List installations for a listing. + // + // List all installations for a particular listing. + ListListingInstallationsByListingId(ctx context.Context, listingId string) (*ListInstallationsResponse, error) + + // Update an installation. + // + // This is a update API that will update the part of the fields defined in the + // installation table as well as interact with external services according to + // the fields not included in the installation table 1. the token will be rotate + // if the rotateToken flag is true 2. the token will be forcibly rotate if the + // rotateToken flag is true and the tokenInfo field is empty + Update(ctx context.Context, request UpdateInstallationRequest) (*UpdateInstallationResponse, error) +} + +func NewConsumerInstallationsPreview(client *client.DatabricksClient) *ConsumerInstallationsPreviewAPI { + return &ConsumerInstallationsPreviewAPI{ + consumerInstallationsPreviewImpl: consumerInstallationsPreviewImpl{ + client: client, + }, + } +} + +// Installations are entities that allow consumers to interact with Databricks +// Marketplace listings. +type ConsumerInstallationsPreviewAPI struct { + consumerInstallationsPreviewImpl +} + +// Uninstall from a listing. +// +// Uninstall an installation associated with a Databricks Marketplace listing. +func (a *ConsumerInstallationsPreviewAPI) DeleteByListingIdAndInstallationId(ctx context.Context, listingId string, installationId string) error { + return a.consumerInstallationsPreviewImpl.Delete(ctx, DeleteInstallationRequest{ + ListingId: listingId, + InstallationId: installationId, + }) +} + +// List installations for a listing. +// +// List all installations for a particular listing. +func (a *ConsumerInstallationsPreviewAPI) ListListingInstallationsByListingId(ctx context.Context, listingId string) (*ListInstallationsResponse, error) { + return a.consumerInstallationsPreviewImpl.internalListListingInstallations(ctx, ListInstallationsRequest{ + ListingId: listingId, + }) +} + +type ConsumerListingsPreviewInterface interface { + + // Get one batch of listings. One may specify up to 50 IDs per request. + // + // Batch get a published listing in the Databricks Marketplace that the consumer + // has access to. + BatchGet(ctx context.Context, request BatchGetListingsRequest) (*BatchGetListingsResponse, error) + + // Get listing. + // + // Get a published listing in the Databricks Marketplace that the consumer has + // access to. + Get(ctx context.Context, request GetListingRequest) (*GetListingResponse, error) + + // Get listing. + // + // Get a published listing in the Databricks Marketplace that the consumer has + // access to. + GetById(ctx context.Context, id string) (*GetListingResponse, error) + + // List listings. + // + // List all published listings in the Databricks Marketplace that the consumer + // has access to. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListListingsRequest) listing.Iterator[Listing] + + // List listings. + // + // List all published listings in the Databricks Marketplace that the consumer + // has access to. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListListingsRequest) ([]Listing, error) + + // ListingSummaryNameToIdMap calls [ConsumerListingsPreviewAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. + // + // Returns an error if there's more than one [Listing] with the same .Summary.Name. + // + // Note: All [Listing] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ListingSummaryNameToIdMap(ctx context.Context, request ListListingsRequest) (map[string]string, error) + + // GetBySummaryName calls [ConsumerListingsPreviewAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. + // + // Returns an error if there's more than one [Listing] with the same .Summary.Name. + // + // Note: All [Listing] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetBySummaryName(ctx context.Context, name string) (*Listing, error) + + // Search listings. + // + // Search published listings in the Databricks Marketplace that the consumer has + // access to. This query supports a variety of different search parameters and + // performs fuzzy matching. + // + // This method is generated by Databricks SDK Code Generator. + Search(ctx context.Context, request SearchListingsRequest) listing.Iterator[Listing] + + // Search listings. + // + // Search published listings in the Databricks Marketplace that the consumer has + // access to. This query supports a variety of different search parameters and + // performs fuzzy matching. + // + // This method is generated by Databricks SDK Code Generator. + SearchAll(ctx context.Context, request SearchListingsRequest) ([]Listing, error) +} + +func NewConsumerListingsPreview(client *client.DatabricksClient) *ConsumerListingsPreviewAPI { + return &ConsumerListingsPreviewAPI{ + consumerListingsPreviewImpl: consumerListingsPreviewImpl{ + client: client, + }, + } +} + +// Listings are the core entities in the Marketplace. They represent the +// products that are available for consumption. +type ConsumerListingsPreviewAPI struct { + consumerListingsPreviewImpl +} + +// Get listing. +// +// Get a published listing in the Databricks Marketplace that the consumer has +// access to. +func (a *ConsumerListingsPreviewAPI) GetById(ctx context.Context, id string) (*GetListingResponse, error) { + return a.consumerListingsPreviewImpl.Get(ctx, GetListingRequest{ + Id: id, + }) +} + +// ListingSummaryNameToIdMap calls [ConsumerListingsPreviewAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. +// +// Returns an error if there's more than one [Listing] with the same .Summary.Name. +// +// Note: All [Listing] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ConsumerListingsPreviewAPI) ListingSummaryNameToIdMap(ctx context.Context, request ListListingsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Summary.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Summary.Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetBySummaryName calls [ConsumerListingsPreviewAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. +// +// Returns an error if there's more than one [Listing] with the same .Summary.Name. +// +// Note: All [Listing] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ConsumerListingsPreviewAPI) GetBySummaryName(ctx context.Context, name string) (*Listing, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListListingsRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]Listing{} + for _, v := range result { + key := v.Summary.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("Listing named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of Listing named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type ConsumerPersonalizationRequestsPreviewInterface interface { + + // Create a personalization request. + // + // Create a personalization request for a listing. + Create(ctx context.Context, request CreatePersonalizationRequest) (*CreatePersonalizationRequestResponse, error) + + // Get the personalization request for a listing. + // + // Get the personalization request for a listing. Each consumer can make at + // *most* one personalization request for a listing. + Get(ctx context.Context, request GetPersonalizationRequestRequest) (*GetPersonalizationRequestResponse, error) + + // Get the personalization request for a listing. + // + // Get the personalization request for a listing. Each consumer can make at + // *most* one personalization request for a listing. + GetByListingId(ctx context.Context, listingId string) (*GetPersonalizationRequestResponse, error) + + // List all personalization requests. + // + // List personalization requests for a consumer across all listings. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] + + // List all personalization requests. + // + // List personalization requests for a consumer across all listings. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) +} + +func NewConsumerPersonalizationRequestsPreview(client *client.DatabricksClient) *ConsumerPersonalizationRequestsPreviewAPI { + return &ConsumerPersonalizationRequestsPreviewAPI{ + consumerPersonalizationRequestsPreviewImpl: consumerPersonalizationRequestsPreviewImpl{ + client: client, + }, + } +} + +// Personalization Requests allow customers to interact with the individualized +// Marketplace listing flow. +type ConsumerPersonalizationRequestsPreviewAPI struct { + consumerPersonalizationRequestsPreviewImpl +} + +// Get the personalization request for a listing. +// +// Get the personalization request for a listing. Each consumer can make at +// *most* one personalization request for a listing. +func (a *ConsumerPersonalizationRequestsPreviewAPI) GetByListingId(ctx context.Context, listingId string) (*GetPersonalizationRequestResponse, error) { + return a.consumerPersonalizationRequestsPreviewImpl.Get(ctx, GetPersonalizationRequestRequest{ + ListingId: listingId, + }) +} + +type ConsumerProvidersPreviewInterface interface { + + // Get one batch of providers. One may specify up to 50 IDs per request. + // + // Batch get a provider in the Databricks Marketplace with at least one visible + // listing. + BatchGet(ctx context.Context, request BatchGetProvidersRequest) (*BatchGetProvidersResponse, error) + + // Get a provider. + // + // Get a provider in the Databricks Marketplace with at least one visible + // listing. + Get(ctx context.Context, request GetProviderRequest) (*GetProviderResponse, error) + + // Get a provider. + // + // Get a provider in the Databricks Marketplace with at least one visible + // listing. + GetById(ctx context.Context, id string) (*GetProviderResponse, error) + + // List providers. + // + // List all providers in the Databricks Marketplace with at least one visible + // listing. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] + + // List providers. + // + // List all providers in the Databricks Marketplace with at least one visible + // listing. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) + + // ProviderInfoNameToIdMap calls [ConsumerProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. + // + // Returns an error if there's more than one [ProviderInfo] with the same .Name. + // + // Note: All [ProviderInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ProviderInfoNameToIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) + + // GetByName calls [ConsumerProvidersPreviewAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. + // + // Returns an error if there's more than one [ProviderInfo] with the same .Name. + // + // Note: All [ProviderInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*ProviderInfo, error) +} + +func NewConsumerProvidersPreview(client *client.DatabricksClient) *ConsumerProvidersPreviewAPI { + return &ConsumerProvidersPreviewAPI{ + consumerProvidersPreviewImpl: consumerProvidersPreviewImpl{ + client: client, + }, + } +} + +// Providers are the entities that publish listings to the Marketplace. +type ConsumerProvidersPreviewAPI struct { + consumerProvidersPreviewImpl +} + +// Get a provider. +// +// Get a provider in the Databricks Marketplace with at least one visible +// listing. +func (a *ConsumerProvidersPreviewAPI) GetById(ctx context.Context, id string) (*GetProviderResponse, error) { + return a.consumerProvidersPreviewImpl.Get(ctx, GetProviderRequest{ + Id: id, + }) +} + +// ProviderInfoNameToIdMap calls [ConsumerProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. +// +// Returns an error if there's more than one [ProviderInfo] with the same .Name. +// +// Note: All [ProviderInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ConsumerProvidersPreviewAPI) ProviderInfoNameToIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByName calls [ConsumerProvidersPreviewAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. +// +// Returns an error if there's more than one [ProviderInfo] with the same .Name. +// +// Note: All [ProviderInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ConsumerProvidersPreviewAPI) GetByName(ctx context.Context, name string) (*ProviderInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListProvidersRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ProviderInfo{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ProviderInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ProviderInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type ProviderExchangeFiltersPreviewInterface interface { + + // Create a new exchange filter. + // + // Add an exchange filter. + Create(ctx context.Context, request CreateExchangeFilterRequest) (*CreateExchangeFilterResponse, error) + + // Delete an exchange filter. + // + // Delete an exchange filter + Delete(ctx context.Context, request DeleteExchangeFilterRequest) error + + // Delete an exchange filter. + // + // Delete an exchange filter + DeleteById(ctx context.Context, id string) error + + // List exchange filters. + // + // List exchange filter + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListExchangeFiltersRequest) listing.Iterator[ExchangeFilter] + + // List exchange filters. + // + // List exchange filter + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListExchangeFiltersRequest) ([]ExchangeFilter, error) + + // ExchangeFilterNameToIdMap calls [ProviderExchangeFiltersPreviewAPI.ListAll] and creates a map of results with [ExchangeFilter].Name as key and [ExchangeFilter].Id as value. + // + // Returns an error if there's more than one [ExchangeFilter] with the same .Name. + // + // Note: All [ExchangeFilter] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ExchangeFilterNameToIdMap(ctx context.Context, request ListExchangeFiltersRequest) (map[string]string, error) + + // GetByName calls [ProviderExchangeFiltersPreviewAPI.ExchangeFilterNameToIdMap] and returns a single [ExchangeFilter]. + // + // Returns an error if there's more than one [ExchangeFilter] with the same .Name. + // + // Note: All [ExchangeFilter] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*ExchangeFilter, error) + + // Update exchange filter. + // + // Update an exchange filter. + Update(ctx context.Context, request UpdateExchangeFilterRequest) (*UpdateExchangeFilterResponse, error) +} + +func NewProviderExchangeFiltersPreview(client *client.DatabricksClient) *ProviderExchangeFiltersPreviewAPI { + return &ProviderExchangeFiltersPreviewAPI{ + providerExchangeFiltersPreviewImpl: providerExchangeFiltersPreviewImpl{ + client: client, + }, + } +} + +// Marketplace exchanges filters curate which groups can access an exchange. +type ProviderExchangeFiltersPreviewAPI struct { + providerExchangeFiltersPreviewImpl +} + +// Delete an exchange filter. +// +// Delete an exchange filter +func (a *ProviderExchangeFiltersPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.providerExchangeFiltersPreviewImpl.Delete(ctx, DeleteExchangeFilterRequest{ + Id: id, + }) +} + +// ExchangeFilterNameToIdMap calls [ProviderExchangeFiltersPreviewAPI.ListAll] and creates a map of results with [ExchangeFilter].Name as key and [ExchangeFilter].Id as value. +// +// Returns an error if there's more than one [ExchangeFilter] with the same .Name. +// +// Note: All [ExchangeFilter] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderExchangeFiltersPreviewAPI) ExchangeFilterNameToIdMap(ctx context.Context, request ListExchangeFiltersRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByName calls [ProviderExchangeFiltersPreviewAPI.ExchangeFilterNameToIdMap] and returns a single [ExchangeFilter]. +// +// Returns an error if there's more than one [ExchangeFilter] with the same .Name. +// +// Note: All [ExchangeFilter] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderExchangeFiltersPreviewAPI) GetByName(ctx context.Context, name string) (*ExchangeFilter, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListExchangeFiltersRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ExchangeFilter{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ExchangeFilter named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ExchangeFilter named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type ProviderExchangesPreviewInterface interface { + + // Add an exchange for listing. + // + // Associate an exchange with a listing + AddListingToExchange(ctx context.Context, request AddExchangeForListingRequest) (*AddExchangeForListingResponse, error) + + // Create an exchange. + // + // Create an exchange + Create(ctx context.Context, request CreateExchangeRequest) (*CreateExchangeResponse, error) + + // Delete an exchange. + // + // This removes a listing from marketplace. + Delete(ctx context.Context, request DeleteExchangeRequest) error + + // Delete an exchange. + // + // This removes a listing from marketplace. + DeleteById(ctx context.Context, id string) error + + // Remove an exchange for listing. + // + // Disassociate an exchange with a listing + DeleteListingFromExchange(ctx context.Context, request RemoveExchangeForListingRequest) error + + // Remove an exchange for listing. + // + // Disassociate an exchange with a listing + DeleteListingFromExchangeById(ctx context.Context, id string) error + + // Get an exchange. + // + // Get an exchange. + Get(ctx context.Context, request GetExchangeRequest) (*GetExchangeResponse, error) + + // Get an exchange. + // + // Get an exchange. + GetById(ctx context.Context, id string) (*GetExchangeResponse, error) + + // List exchanges. + // + // List exchanges visible to provider + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListExchangesRequest) listing.Iterator[Exchange] + + // List exchanges. + // + // List exchanges visible to provider + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListExchangesRequest) ([]Exchange, error) + + // ExchangeNameToIdMap calls [ProviderExchangesPreviewAPI.ListAll] and creates a map of results with [Exchange].Name as key and [Exchange].Id as value. + // + // Returns an error if there's more than one [Exchange] with the same .Name. + // + // Note: All [Exchange] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ExchangeNameToIdMap(ctx context.Context, request ListExchangesRequest) (map[string]string, error) + + // GetByName calls [ProviderExchangesPreviewAPI.ExchangeNameToIdMap] and returns a single [Exchange]. + // + // Returns an error if there's more than one [Exchange] with the same .Name. + // + // Note: All [Exchange] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*Exchange, error) + + // List exchanges for listing. + // + // List exchanges associated with a listing + // + // This method is generated by Databricks SDK Code Generator. + ListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) listing.Iterator[ExchangeListing] + + // List exchanges for listing. + // + // List exchanges associated with a listing + // + // This method is generated by Databricks SDK Code Generator. + ListExchangesForListingAll(ctx context.Context, request ListExchangesForListingRequest) ([]ExchangeListing, error) + + // ExchangeListingExchangeNameToExchangeIdMap calls [ProviderExchangesPreviewAPI.ListExchangesForListingAll] and creates a map of results with [ExchangeListing].ExchangeName as key and [ExchangeListing].ExchangeId as value. + // + // Returns an error if there's more than one [ExchangeListing] with the same .ExchangeName. + // + // Note: All [ExchangeListing] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ExchangeListingExchangeNameToExchangeIdMap(ctx context.Context, request ListExchangesForListingRequest) (map[string]string, error) + + // GetByExchangeName calls [ProviderExchangesPreviewAPI.ExchangeListingExchangeNameToExchangeIdMap] and returns a single [ExchangeListing]. + // + // Returns an error if there's more than one [ExchangeListing] with the same .ExchangeName. + // + // Note: All [ExchangeListing] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByExchangeName(ctx context.Context, name string) (*ExchangeListing, error) + + // List listings for exchange. + // + // List listings associated with an exchange + // + // This method is generated by Databricks SDK Code Generator. + ListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) listing.Iterator[ExchangeListing] + + // List listings for exchange. + // + // List listings associated with an exchange + // + // This method is generated by Databricks SDK Code Generator. + ListListingsForExchangeAll(ctx context.Context, request ListListingsForExchangeRequest) ([]ExchangeListing, error) + + // ExchangeListingListingNameToListingIdMap calls [ProviderExchangesPreviewAPI.ListListingsForExchangeAll] and creates a map of results with [ExchangeListing].ListingName as key and [ExchangeListing].ListingId as value. + // + // Returns an error if there's more than one [ExchangeListing] with the same .ListingName. + // + // Note: All [ExchangeListing] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ExchangeListingListingNameToListingIdMap(ctx context.Context, request ListListingsForExchangeRequest) (map[string]string, error) + + // GetByListingName calls [ProviderExchangesPreviewAPI.ExchangeListingListingNameToListingIdMap] and returns a single [ExchangeListing]. + // + // Returns an error if there's more than one [ExchangeListing] with the same .ListingName. + // + // Note: All [ExchangeListing] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByListingName(ctx context.Context, name string) (*ExchangeListing, error) + + // Update exchange. + // + // Update an exchange + Update(ctx context.Context, request UpdateExchangeRequest) (*UpdateExchangeResponse, error) +} + +func NewProviderExchangesPreview(client *client.DatabricksClient) *ProviderExchangesPreviewAPI { + return &ProviderExchangesPreviewAPI{ + providerExchangesPreviewImpl: providerExchangesPreviewImpl{ + client: client, + }, + } +} + +// Marketplace exchanges allow providers to share their listings with a curated +// set of customers. +type ProviderExchangesPreviewAPI struct { + providerExchangesPreviewImpl +} + +// Delete an exchange. +// +// This removes a listing from marketplace. +func (a *ProviderExchangesPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.providerExchangesPreviewImpl.Delete(ctx, DeleteExchangeRequest{ + Id: id, + }) +} + +// Remove an exchange for listing. +// +// Disassociate an exchange with a listing +func (a *ProviderExchangesPreviewAPI) DeleteListingFromExchangeById(ctx context.Context, id string) error { + return a.providerExchangesPreviewImpl.DeleteListingFromExchange(ctx, RemoveExchangeForListingRequest{ + Id: id, + }) +} + +// Get an exchange. +// +// Get an exchange. +func (a *ProviderExchangesPreviewAPI) GetById(ctx context.Context, id string) (*GetExchangeResponse, error) { + return a.providerExchangesPreviewImpl.Get(ctx, GetExchangeRequest{ + Id: id, + }) +} + +// ExchangeNameToIdMap calls [ProviderExchangesPreviewAPI.ListAll] and creates a map of results with [Exchange].Name as key and [Exchange].Id as value. +// +// Returns an error if there's more than one [Exchange] with the same .Name. +// +// Note: All [Exchange] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderExchangesPreviewAPI) ExchangeNameToIdMap(ctx context.Context, request ListExchangesRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByName calls [ProviderExchangesPreviewAPI.ExchangeNameToIdMap] and returns a single [Exchange]. +// +// Returns an error if there's more than one [Exchange] with the same .Name. +// +// Note: All [Exchange] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderExchangesPreviewAPI) GetByName(ctx context.Context, name string) (*Exchange, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListExchangesRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]Exchange{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("Exchange named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of Exchange named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +// ExchangeListingExchangeNameToExchangeIdMap calls [ProviderExchangesPreviewAPI.ListExchangesForListingAll] and creates a map of results with [ExchangeListing].ExchangeName as key and [ExchangeListing].ExchangeId as value. +// +// Returns an error if there's more than one [ExchangeListing] with the same .ExchangeName. +// +// Note: All [ExchangeListing] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderExchangesPreviewAPI) ExchangeListingExchangeNameToExchangeIdMap(ctx context.Context, request ListExchangesForListingRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListExchangesForListingAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.ExchangeName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .ExchangeName: %s", key) + } + mapping[key] = v.ExchangeId + } + return mapping, nil +} + +// GetByExchangeName calls [ProviderExchangesPreviewAPI.ExchangeListingExchangeNameToExchangeIdMap] and returns a single [ExchangeListing]. +// +// Returns an error if there's more than one [ExchangeListing] with the same .ExchangeName. +// +// Note: All [ExchangeListing] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderExchangesPreviewAPI) GetByExchangeName(ctx context.Context, name string) (*ExchangeListing, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListExchangesForListingAll(ctx, ListExchangesForListingRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ExchangeListing{} + for _, v := range result { + key := v.ExchangeName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ExchangeListing named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ExchangeListing named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +// ExchangeListingListingNameToListingIdMap calls [ProviderExchangesPreviewAPI.ListListingsForExchangeAll] and creates a map of results with [ExchangeListing].ListingName as key and [ExchangeListing].ListingId as value. +// +// Returns an error if there's more than one [ExchangeListing] with the same .ListingName. +// +// Note: All [ExchangeListing] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderExchangesPreviewAPI) ExchangeListingListingNameToListingIdMap(ctx context.Context, request ListListingsForExchangeRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListListingsForExchangeAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.ListingName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .ListingName: %s", key) + } + mapping[key] = v.ListingId + } + return mapping, nil +} + +// GetByListingName calls [ProviderExchangesPreviewAPI.ExchangeListingListingNameToListingIdMap] and returns a single [ExchangeListing]. +// +// Returns an error if there's more than one [ExchangeListing] with the same .ListingName. +// +// Note: All [ExchangeListing] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderExchangesPreviewAPI) GetByListingName(ctx context.Context, name string) (*ExchangeListing, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListListingsForExchangeAll(ctx, ListListingsForExchangeRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ExchangeListing{} + for _, v := range result { + key := v.ListingName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ExchangeListing named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ExchangeListing named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type ProviderFilesPreviewInterface interface { + + // Create a file. + // + // Create a file. Currently, only provider icons and attached notebooks are + // supported. + Create(ctx context.Context, request CreateFileRequest) (*CreateFileResponse, error) + + // Delete a file. + // + // Delete a file + Delete(ctx context.Context, request DeleteFileRequest) error + + // Delete a file. + // + // Delete a file + DeleteByFileId(ctx context.Context, fileId string) error + + // Get a file. + // + // Get a file + Get(ctx context.Context, request GetFileRequest) (*GetFileResponse, error) + + // Get a file. + // + // Get a file + GetByFileId(ctx context.Context, fileId string) (*GetFileResponse, error) + + // List files. + // + // List files attached to a parent entity. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListFilesRequest) listing.Iterator[FileInfo] + + // List files. + // + // List files attached to a parent entity. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListFilesRequest) ([]FileInfo, error) + + // FileInfoDisplayNameToIdMap calls [ProviderFilesPreviewAPI.ListAll] and creates a map of results with [FileInfo].DisplayName as key and [FileInfo].Id as value. + // + // Returns an error if there's more than one [FileInfo] with the same .DisplayName. + // + // Note: All [FileInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + FileInfoDisplayNameToIdMap(ctx context.Context, request ListFilesRequest) (map[string]string, error) + + // GetByDisplayName calls [ProviderFilesPreviewAPI.FileInfoDisplayNameToIdMap] and returns a single [FileInfo]. + // + // Returns an error if there's more than one [FileInfo] with the same .DisplayName. + // + // Note: All [FileInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByDisplayName(ctx context.Context, name string) (*FileInfo, error) +} + +func NewProviderFilesPreview(client *client.DatabricksClient) *ProviderFilesPreviewAPI { + return &ProviderFilesPreviewAPI{ + providerFilesPreviewImpl: providerFilesPreviewImpl{ + client: client, + }, + } +} + +// Marketplace offers a set of file APIs for various purposes such as preview +// notebooks and provider icons. +type ProviderFilesPreviewAPI struct { + providerFilesPreviewImpl +} + +// Delete a file. +// +// Delete a file +func (a *ProviderFilesPreviewAPI) DeleteByFileId(ctx context.Context, fileId string) error { + return a.providerFilesPreviewImpl.Delete(ctx, DeleteFileRequest{ + FileId: fileId, + }) +} + +// Get a file. +// +// Get a file +func (a *ProviderFilesPreviewAPI) GetByFileId(ctx context.Context, fileId string) (*GetFileResponse, error) { + return a.providerFilesPreviewImpl.Get(ctx, GetFileRequest{ + FileId: fileId, + }) +} + +// FileInfoDisplayNameToIdMap calls [ProviderFilesPreviewAPI.ListAll] and creates a map of results with [FileInfo].DisplayName as key and [FileInfo].Id as value. +// +// Returns an error if there's more than one [FileInfo] with the same .DisplayName. +// +// Note: All [FileInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderFilesPreviewAPI) FileInfoDisplayNameToIdMap(ctx context.Context, request ListFilesRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.DisplayName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .DisplayName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByDisplayName calls [ProviderFilesPreviewAPI.FileInfoDisplayNameToIdMap] and returns a single [FileInfo]. +// +// Returns an error if there's more than one [FileInfo] with the same .DisplayName. +// +// Note: All [FileInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderFilesPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*FileInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListFilesRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]FileInfo{} + for _, v := range result { + key := v.DisplayName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("FileInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of FileInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type ProviderListingsPreviewInterface interface { + + // Create a listing. + // + // Create a new listing + Create(ctx context.Context, request CreateListingRequest) (*CreateListingResponse, error) + + // Delete a listing. + // + // Delete a listing + Delete(ctx context.Context, request DeleteListingRequest) error + + // Delete a listing. + // + // Delete a listing + DeleteById(ctx context.Context, id string) error + + // Get a listing. + // + // Get a listing + Get(ctx context.Context, request GetListingRequest) (*GetListingResponse, error) + + // Get a listing. + // + // Get a listing + GetById(ctx context.Context, id string) (*GetListingResponse, error) + + // List listings. + // + // List listings owned by this provider + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request GetListingsRequest) listing.Iterator[Listing] + + // List listings. + // + // List listings owned by this provider + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request GetListingsRequest) ([]Listing, error) + + // ListingSummaryNameToIdMap calls [ProviderListingsPreviewAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. + // + // Returns an error if there's more than one [Listing] with the same .Summary.Name. + // + // Note: All [Listing] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ListingSummaryNameToIdMap(ctx context.Context, request GetListingsRequest) (map[string]string, error) + + // GetBySummaryName calls [ProviderListingsPreviewAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. + // + // Returns an error if there's more than one [Listing] with the same .Summary.Name. + // + // Note: All [Listing] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetBySummaryName(ctx context.Context, name string) (*Listing, error) + + // Update listing. + // + // Update a listing + Update(ctx context.Context, request UpdateListingRequest) (*UpdateListingResponse, error) +} + +func NewProviderListingsPreview(client *client.DatabricksClient) *ProviderListingsPreviewAPI { + return &ProviderListingsPreviewAPI{ + providerListingsPreviewImpl: providerListingsPreviewImpl{ + client: client, + }, + } +} + +// Listings are the core entities in the Marketplace. They represent the +// products that are available for consumption. +type ProviderListingsPreviewAPI struct { + providerListingsPreviewImpl +} + +// Delete a listing. +// +// Delete a listing +func (a *ProviderListingsPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.providerListingsPreviewImpl.Delete(ctx, DeleteListingRequest{ + Id: id, + }) +} + +// Get a listing. +// +// Get a listing +func (a *ProviderListingsPreviewAPI) GetById(ctx context.Context, id string) (*GetListingResponse, error) { + return a.providerListingsPreviewImpl.Get(ctx, GetListingRequest{ + Id: id, + }) +} + +// ListingSummaryNameToIdMap calls [ProviderListingsPreviewAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. +// +// Returns an error if there's more than one [Listing] with the same .Summary.Name. +// +// Note: All [Listing] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderListingsPreviewAPI) ListingSummaryNameToIdMap(ctx context.Context, request GetListingsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Summary.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Summary.Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetBySummaryName calls [ProviderListingsPreviewAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. +// +// Returns an error if there's more than one [Listing] with the same .Summary.Name. +// +// Note: All [Listing] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderListingsPreviewAPI) GetBySummaryName(ctx context.Context, name string) (*Listing, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, GetListingsRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]Listing{} + for _, v := range result { + key := v.Summary.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("Listing named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of Listing named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type ProviderPersonalizationRequestsPreviewInterface interface { + + // All personalization requests across all listings. + // + // List personalization requests to this provider. This will return all + // personalization requests, regardless of which listing they are for. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] + + // All personalization requests across all listings. + // + // List personalization requests to this provider. This will return all + // personalization requests, regardless of which listing they are for. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) + + // Update personalization request status. + // + // Update personalization request. This method only permits updating the status + // of the request. + Update(ctx context.Context, request UpdatePersonalizationRequestRequest) (*UpdatePersonalizationRequestResponse, error) +} + +func NewProviderPersonalizationRequestsPreview(client *client.DatabricksClient) *ProviderPersonalizationRequestsPreviewAPI { + return &ProviderPersonalizationRequestsPreviewAPI{ + providerPersonalizationRequestsPreviewImpl: providerPersonalizationRequestsPreviewImpl{ + client: client, + }, + } +} + +// Personalization requests are an alternate to instantly available listings. +// Control the lifecycle of personalized solutions. +type ProviderPersonalizationRequestsPreviewAPI struct { + providerPersonalizationRequestsPreviewImpl +} + +type ProviderProviderAnalyticsDashboardsPreviewInterface interface { + + // Create provider analytics dashboard. + // + // Create provider analytics dashboard. Returns Marketplace specific `id`. Not + // to be confused with the Lakeview dashboard id. + Create(ctx context.Context) (*ProviderAnalyticsDashboard, error) + + // Get provider analytics dashboard. + // + // Get provider analytics dashboard. + Get(ctx context.Context) (*ListProviderAnalyticsDashboardResponse, error) + + // Get latest version of provider analytics dashboard. + // + // Get latest version of provider analytics dashboard. + GetLatestVersion(ctx context.Context) (*GetLatestVersionProviderAnalyticsDashboardResponse, error) + + // Update provider analytics dashboard. + // + // Update provider analytics dashboard. + Update(ctx context.Context, request UpdateProviderAnalyticsDashboardRequest) (*UpdateProviderAnalyticsDashboardResponse, error) +} + +func NewProviderProviderAnalyticsDashboardsPreview(client *client.DatabricksClient) *ProviderProviderAnalyticsDashboardsPreviewAPI { + return &ProviderProviderAnalyticsDashboardsPreviewAPI{ + providerProviderAnalyticsDashboardsPreviewImpl: providerProviderAnalyticsDashboardsPreviewImpl{ + client: client, + }, + } +} + +// Manage templated analytics solution for providers. +type ProviderProviderAnalyticsDashboardsPreviewAPI struct { + providerProviderAnalyticsDashboardsPreviewImpl +} + +type ProviderProvidersPreviewInterface interface { + + // Create a provider. + // + // Create a provider + Create(ctx context.Context, request CreateProviderRequest) (*CreateProviderResponse, error) + + // Delete provider. + // + // Delete provider + Delete(ctx context.Context, request DeleteProviderRequest) error + + // Delete provider. + // + // Delete provider + DeleteById(ctx context.Context, id string) error + + // Get provider. + // + // Get provider profile + Get(ctx context.Context, request GetProviderRequest) (*GetProviderResponse, error) + + // Get provider. + // + // Get provider profile + GetById(ctx context.Context, id string) (*GetProviderResponse, error) + + // List providers. + // + // List provider profiles for account. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] + + // List providers. + // + // List provider profiles for account. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) + + // ProviderInfoNameToIdMap calls [ProviderProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. + // + // Returns an error if there's more than one [ProviderInfo] with the same .Name. + // + // Note: All [ProviderInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ProviderInfoNameToIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) + + // GetByName calls [ProviderProvidersPreviewAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. + // + // Returns an error if there's more than one [ProviderInfo] with the same .Name. + // + // Note: All [ProviderInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*ProviderInfo, error) + + // Update provider. + // + // Update provider profile + Update(ctx context.Context, request UpdateProviderRequest) (*UpdateProviderResponse, error) +} + +func NewProviderProvidersPreview(client *client.DatabricksClient) *ProviderProvidersPreviewAPI { + return &ProviderProvidersPreviewAPI{ + providerProvidersPreviewImpl: providerProvidersPreviewImpl{ + client: client, + }, + } +} + +// Providers are entities that manage assets in Marketplace. +type ProviderProvidersPreviewAPI struct { + providerProvidersPreviewImpl +} + +// Delete provider. +// +// Delete provider +func (a *ProviderProvidersPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.providerProvidersPreviewImpl.Delete(ctx, DeleteProviderRequest{ + Id: id, + }) +} + +// Get provider. +// +// Get provider profile +func (a *ProviderProvidersPreviewAPI) GetById(ctx context.Context, id string) (*GetProviderResponse, error) { + return a.providerProvidersPreviewImpl.Get(ctx, GetProviderRequest{ + Id: id, + }) +} + +// ProviderInfoNameToIdMap calls [ProviderProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. +// +// Returns an error if there's more than one [ProviderInfo] with the same .Name. +// +// Note: All [ProviderInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderProvidersPreviewAPI) ProviderInfoNameToIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByName calls [ProviderProvidersPreviewAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. +// +// Returns an error if there's more than one [ProviderInfo] with the same .Name. +// +// Note: All [ProviderInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProviderProvidersPreviewAPI) GetByName(ctx context.Context, name string) (*ProviderInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListProvidersRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ProviderInfo{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ProviderInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ProviderInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} diff --git a/marketplace/v2preview/client.go b/marketplace/v2preview/client.go new file mode 100755 index 000000000..4c7590fdb --- /dev/null +++ b/marketplace/v2preview/client.go @@ -0,0 +1,419 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package marketplacepreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type ConsumerFulfillmentsPreviewClient struct { + ConsumerFulfillmentsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewConsumerFulfillmentsPreviewClient(cfg *config.Config) (*ConsumerFulfillmentsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ConsumerFulfillmentsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ConsumerFulfillmentsPreviewInterface: NewConsumerFulfillmentsPreview(databricksClient), + }, nil +} + +type ConsumerInstallationsPreviewClient struct { + ConsumerInstallationsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewConsumerInstallationsPreviewClient(cfg *config.Config) (*ConsumerInstallationsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ConsumerInstallationsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ConsumerInstallationsPreviewInterface: NewConsumerInstallationsPreview(databricksClient), + }, nil +} + +type ConsumerListingsPreviewClient struct { + ConsumerListingsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewConsumerListingsPreviewClient(cfg *config.Config) (*ConsumerListingsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ConsumerListingsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ConsumerListingsPreviewInterface: NewConsumerListingsPreview(databricksClient), + }, nil +} + +type ConsumerPersonalizationRequestsPreviewClient struct { + ConsumerPersonalizationRequestsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewConsumerPersonalizationRequestsPreviewClient(cfg *config.Config) (*ConsumerPersonalizationRequestsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ConsumerPersonalizationRequestsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ConsumerPersonalizationRequestsPreviewInterface: NewConsumerPersonalizationRequestsPreview(databricksClient), + }, nil +} + +type ConsumerProvidersPreviewClient struct { + ConsumerProvidersPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewConsumerProvidersPreviewClient(cfg *config.Config) (*ConsumerProvidersPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ConsumerProvidersPreviewClient{ + Config: cfg, + apiClient: apiClient, + ConsumerProvidersPreviewInterface: NewConsumerProvidersPreview(databricksClient), + }, nil +} + +type ProviderExchangeFiltersPreviewClient struct { + ProviderExchangeFiltersPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewProviderExchangeFiltersPreviewClient(cfg *config.Config) (*ProviderExchangeFiltersPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ProviderExchangeFiltersPreviewClient{ + Config: cfg, + apiClient: apiClient, + ProviderExchangeFiltersPreviewInterface: NewProviderExchangeFiltersPreview(databricksClient), + }, nil +} + +type ProviderExchangesPreviewClient struct { + ProviderExchangesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewProviderExchangesPreviewClient(cfg *config.Config) (*ProviderExchangesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ProviderExchangesPreviewClient{ + Config: cfg, + apiClient: apiClient, + ProviderExchangesPreviewInterface: NewProviderExchangesPreview(databricksClient), + }, nil +} + +type ProviderFilesPreviewClient struct { + ProviderFilesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewProviderFilesPreviewClient(cfg *config.Config) (*ProviderFilesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ProviderFilesPreviewClient{ + Config: cfg, + apiClient: apiClient, + ProviderFilesPreviewInterface: NewProviderFilesPreview(databricksClient), + }, nil +} + +type ProviderListingsPreviewClient struct { + ProviderListingsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewProviderListingsPreviewClient(cfg *config.Config) (*ProviderListingsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ProviderListingsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ProviderListingsPreviewInterface: NewProviderListingsPreview(databricksClient), + }, nil +} + +type ProviderPersonalizationRequestsPreviewClient struct { + ProviderPersonalizationRequestsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewProviderPersonalizationRequestsPreviewClient(cfg *config.Config) (*ProviderPersonalizationRequestsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ProviderPersonalizationRequestsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ProviderPersonalizationRequestsPreviewInterface: NewProviderPersonalizationRequestsPreview(databricksClient), + }, nil +} + +type ProviderProviderAnalyticsDashboardsPreviewClient struct { + ProviderProviderAnalyticsDashboardsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewProviderProviderAnalyticsDashboardsPreviewClient(cfg *config.Config) (*ProviderProviderAnalyticsDashboardsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ProviderProviderAnalyticsDashboardsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ProviderProviderAnalyticsDashboardsPreviewInterface: NewProviderProviderAnalyticsDashboardsPreview(databricksClient), + }, nil +} + +type ProviderProvidersPreviewClient struct { + ProviderProvidersPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewProviderProvidersPreviewClient(cfg *config.Config) (*ProviderProvidersPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ProviderProvidersPreviewClient{ + Config: cfg, + apiClient: apiClient, + ProviderProvidersPreviewInterface: NewProviderProvidersPreview(databricksClient), + }, nil +} diff --git a/marketplace/v2preview/impl.go b/marketplace/v2preview/impl.go new file mode 100755 index 000000000..150e929c3 --- /dev/null +++ b/marketplace/v2preview/impl.go @@ -0,0 +1,1150 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package marketplacepreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just ConsumerFulfillmentsPreview API methods +type consumerFulfillmentsPreviewImpl struct { + client *client.DatabricksClient +} + +// Get listing content metadata. +// +// Get a high level preview of the metadata of listing installable content. +func (a *consumerFulfillmentsPreviewImpl) Get(ctx context.Context, request GetListingContentMetadataRequest) listing.Iterator[SharedDataObject] { + + getNextPage := func(ctx context.Context, req GetListingContentMetadataRequest) (*GetListingContentMetadataResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalGet(ctx, req) + } + getItems := func(resp *GetListingContentMetadataResponse) []SharedDataObject { + return resp.SharedDataObjects + } + getNextReq := func(resp *GetListingContentMetadataResponse) *GetListingContentMetadataRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get listing content metadata. +// +// Get a high level preview of the metadata of listing installable content. +func (a *consumerFulfillmentsPreviewImpl) GetAll(ctx context.Context, request GetListingContentMetadataRequest) ([]SharedDataObject, error) { + iterator := a.Get(ctx, request) + return listing.ToSlice[SharedDataObject](ctx, iterator) +} +func (a *consumerFulfillmentsPreviewImpl) internalGet(ctx context.Context, request GetListingContentMetadataRequest) (*GetListingContentMetadataResponse, error) { + var getListingContentMetadataResponse GetListingContentMetadataResponse + path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/content", request.ListingId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getListingContentMetadataResponse) + return &getListingContentMetadataResponse, err +} + +// List all listing fulfillments. +// +// Get all listings fulfillments associated with a listing. A _fulfillment_ is a +// potential installation. Standard installations contain metadata about the +// attached share or git repo. Only one of these fields will be present. +// Personalized installations contain metadata about the attached share or git +// repo, as well as the Delta Sharing recipient type. +func (a *consumerFulfillmentsPreviewImpl) List(ctx context.Context, request ListFulfillmentsRequest) listing.Iterator[ListingFulfillment] { + + getNextPage := func(ctx context.Context, req ListFulfillmentsRequest) (*ListFulfillmentsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListFulfillmentsResponse) []ListingFulfillment { + return resp.Fulfillments + } + getNextReq := func(resp *ListFulfillmentsResponse) *ListFulfillmentsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List all listing fulfillments. +// +// Get all listings fulfillments associated with a listing. A _fulfillment_ is a +// potential installation. Standard installations contain metadata about the +// attached share or git repo. Only one of these fields will be present. +// Personalized installations contain metadata about the attached share or git +// repo, as well as the Delta Sharing recipient type. +func (a *consumerFulfillmentsPreviewImpl) ListAll(ctx context.Context, request ListFulfillmentsRequest) ([]ListingFulfillment, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ListingFulfillment](ctx, iterator) +} +func (a *consumerFulfillmentsPreviewImpl) internalList(ctx context.Context, request ListFulfillmentsRequest) (*ListFulfillmentsResponse, error) { + var listFulfillmentsResponse ListFulfillmentsResponse + path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/fulfillments", request.ListingId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listFulfillmentsResponse) + return &listFulfillmentsResponse, err +} + +// unexported type that holds implementations of just ConsumerInstallationsPreview API methods +type consumerInstallationsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *consumerInstallationsPreviewImpl) Create(ctx context.Context, request CreateInstallationRequest) (*Installation, error) { + var installation Installation + path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/installations", request.ListingId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &installation) + return &installation, err +} + +func (a *consumerInstallationsPreviewImpl) Delete(ctx context.Context, request DeleteInstallationRequest) error { + var deleteInstallationResponse DeleteInstallationResponse + path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/installations/%v", request.ListingId, request.InstallationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteInstallationResponse) + return err +} + +// List all installations. +// +// List all installations across all listings. +func (a *consumerInstallationsPreviewImpl) List(ctx context.Context, request ListAllInstallationsRequest) listing.Iterator[InstallationDetail] { + + getNextPage := func(ctx context.Context, req ListAllInstallationsRequest) (*ListAllInstallationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAllInstallationsResponse) []InstallationDetail { + return resp.Installations + } + getNextReq := func(resp *ListAllInstallationsResponse) *ListAllInstallationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List all installations. +// +// List all installations across all listings. +func (a *consumerInstallationsPreviewImpl) ListAll(ctx context.Context, request ListAllInstallationsRequest) ([]InstallationDetail, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[InstallationDetail](ctx, iterator) +} +func (a *consumerInstallationsPreviewImpl) internalList(ctx context.Context, request ListAllInstallationsRequest) (*ListAllInstallationsResponse, error) { + var listAllInstallationsResponse ListAllInstallationsResponse + path := "/api/2.1preview/marketplace-consumer/installations" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAllInstallationsResponse) + return &listAllInstallationsResponse, err +} + +// List installations for a listing. +// +// List all installations for a particular listing. +func (a *consumerInstallationsPreviewImpl) ListListingInstallations(ctx context.Context, request ListInstallationsRequest) listing.Iterator[InstallationDetail] { + + getNextPage := func(ctx context.Context, req ListInstallationsRequest) (*ListInstallationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListListingInstallations(ctx, req) + } + getItems := func(resp *ListInstallationsResponse) []InstallationDetail { + return resp.Installations + } + getNextReq := func(resp *ListInstallationsResponse) *ListInstallationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List installations for a listing. +// +// List all installations for a particular listing. +func (a *consumerInstallationsPreviewImpl) ListListingInstallationsAll(ctx context.Context, request ListInstallationsRequest) ([]InstallationDetail, error) { + iterator := a.ListListingInstallations(ctx, request) + return listing.ToSlice[InstallationDetail](ctx, iterator) +} +func (a *consumerInstallationsPreviewImpl) internalListListingInstallations(ctx context.Context, request ListInstallationsRequest) (*ListInstallationsResponse, error) { + var listInstallationsResponse ListInstallationsResponse + path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/installations", request.ListingId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listInstallationsResponse) + return &listInstallationsResponse, err +} + +func (a *consumerInstallationsPreviewImpl) Update(ctx context.Context, request UpdateInstallationRequest) (*UpdateInstallationResponse, error) { + var updateInstallationResponse UpdateInstallationResponse + path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/installations/%v", request.ListingId, request.InstallationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateInstallationResponse) + return &updateInstallationResponse, err +} + +// unexported type that holds implementations of just ConsumerListingsPreview API methods +type consumerListingsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *consumerListingsPreviewImpl) BatchGet(ctx context.Context, request BatchGetListingsRequest) (*BatchGetListingsResponse, error) { + var batchGetListingsResponse BatchGetListingsResponse + path := "/api/2.1preview/marketplace-consumer/listings:batchGet" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &batchGetListingsResponse) + return &batchGetListingsResponse, err +} + +func (a *consumerListingsPreviewImpl) Get(ctx context.Context, request GetListingRequest) (*GetListingResponse, error) { + var getListingResponse GetListingResponse + path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getListingResponse) + return &getListingResponse, err +} + +// List listings. +// +// List all published listings in the Databricks Marketplace that the consumer +// has access to. +func (a *consumerListingsPreviewImpl) List(ctx context.Context, request ListListingsRequest) listing.Iterator[Listing] { + + getNextPage := func(ctx context.Context, req ListListingsRequest) (*ListListingsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListListingsResponse) []Listing { + return resp.Listings + } + getNextReq := func(resp *ListListingsResponse) *ListListingsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List listings. +// +// List all published listings in the Databricks Marketplace that the consumer +// has access to. +func (a *consumerListingsPreviewImpl) ListAll(ctx context.Context, request ListListingsRequest) ([]Listing, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[Listing](ctx, iterator) +} +func (a *consumerListingsPreviewImpl) internalList(ctx context.Context, request ListListingsRequest) (*ListListingsResponse, error) { + var listListingsResponse ListListingsResponse + path := "/api/2.1preview/marketplace-consumer/listings" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listListingsResponse) + return &listListingsResponse, err +} + +// Search listings. +// +// Search published listings in the Databricks Marketplace that the consumer has +// access to. This query supports a variety of different search parameters and +// performs fuzzy matching. +func (a *consumerListingsPreviewImpl) Search(ctx context.Context, request SearchListingsRequest) listing.Iterator[Listing] { + + getNextPage := func(ctx context.Context, req SearchListingsRequest) (*SearchListingsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalSearch(ctx, req) + } + getItems := func(resp *SearchListingsResponse) []Listing { + return resp.Listings + } + getNextReq := func(resp *SearchListingsResponse) *SearchListingsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Search listings. +// +// Search published listings in the Databricks Marketplace that the consumer has +// access to. This query supports a variety of different search parameters and +// performs fuzzy matching. +func (a *consumerListingsPreviewImpl) SearchAll(ctx context.Context, request SearchListingsRequest) ([]Listing, error) { + iterator := a.Search(ctx, request) + return listing.ToSlice[Listing](ctx, iterator) +} +func (a *consumerListingsPreviewImpl) internalSearch(ctx context.Context, request SearchListingsRequest) (*SearchListingsResponse, error) { + var searchListingsResponse SearchListingsResponse + path := "/api/2.1preview/marketplace-consumer/search-listings" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &searchListingsResponse) + return &searchListingsResponse, err +} + +// unexported type that holds implementations of just ConsumerPersonalizationRequestsPreview API methods +type consumerPersonalizationRequestsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *consumerPersonalizationRequestsPreviewImpl) Create(ctx context.Context, request CreatePersonalizationRequest) (*CreatePersonalizationRequestResponse, error) { + var createPersonalizationRequestResponse CreatePersonalizationRequestResponse + path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/personalization-requests", request.ListingId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createPersonalizationRequestResponse) + return &createPersonalizationRequestResponse, err +} + +func (a *consumerPersonalizationRequestsPreviewImpl) Get(ctx context.Context, request GetPersonalizationRequestRequest) (*GetPersonalizationRequestResponse, error) { + var getPersonalizationRequestResponse GetPersonalizationRequestResponse + path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/personalization-requests", request.ListingId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPersonalizationRequestResponse) + return &getPersonalizationRequestResponse, err +} + +// List all personalization requests. +// +// List personalization requests for a consumer across all listings. +func (a *consumerPersonalizationRequestsPreviewImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] { + + getNextPage := func(ctx context.Context, req ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAllPersonalizationRequestsResponse) []PersonalizationRequest { + return resp.PersonalizationRequests + } + getNextReq := func(resp *ListAllPersonalizationRequestsResponse) *ListAllPersonalizationRequestsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List all personalization requests. +// +// List personalization requests for a consumer across all listings. +func (a *consumerPersonalizationRequestsPreviewImpl) ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[PersonalizationRequest](ctx, iterator) +} +func (a *consumerPersonalizationRequestsPreviewImpl) internalList(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { + var listAllPersonalizationRequestsResponse ListAllPersonalizationRequestsResponse + path := "/api/2.1preview/marketplace-consumer/personalization-requests" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAllPersonalizationRequestsResponse) + return &listAllPersonalizationRequestsResponse, err +} + +// unexported type that holds implementations of just ConsumerProvidersPreview API methods +type consumerProvidersPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *consumerProvidersPreviewImpl) BatchGet(ctx context.Context, request BatchGetProvidersRequest) (*BatchGetProvidersResponse, error) { + var batchGetProvidersResponse BatchGetProvidersResponse + path := "/api/2.1preview/marketplace-consumer/providers:batchGet" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &batchGetProvidersResponse) + return &batchGetProvidersResponse, err +} + +func (a *consumerProvidersPreviewImpl) Get(ctx context.Context, request GetProviderRequest) (*GetProviderResponse, error) { + var getProviderResponse GetProviderResponse + path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/providers/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getProviderResponse) + return &getProviderResponse, err +} + +// List providers. +// +// List all providers in the Databricks Marketplace with at least one visible +// listing. +func (a *consumerProvidersPreviewImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { + + getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListProvidersResponse) []ProviderInfo { + return resp.Providers + } + getNextReq := func(resp *ListProvidersResponse) *ListProvidersRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List providers. +// +// List all providers in the Databricks Marketplace with at least one visible +// listing. +func (a *consumerProvidersPreviewImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ProviderInfo](ctx, iterator) +} +func (a *consumerProvidersPreviewImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { + var listProvidersResponse ListProvidersResponse + path := "/api/2.1preview/marketplace-consumer/providers" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listProvidersResponse) + return &listProvidersResponse, err +} + +// unexported type that holds implementations of just ProviderExchangeFiltersPreview API methods +type providerExchangeFiltersPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *providerExchangeFiltersPreviewImpl) Create(ctx context.Context, request CreateExchangeFilterRequest) (*CreateExchangeFilterResponse, error) { + var createExchangeFilterResponse CreateExchangeFilterResponse + path := "/api/2.0preview/marketplace-exchange/filters" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createExchangeFilterResponse) + return &createExchangeFilterResponse, err +} + +func (a *providerExchangeFiltersPreviewImpl) Delete(ctx context.Context, request DeleteExchangeFilterRequest) error { + var deleteExchangeFilterResponse DeleteExchangeFilterResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/filters/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteExchangeFilterResponse) + return err +} + +// List exchange filters. +// +// List exchange filter +func (a *providerExchangeFiltersPreviewImpl) List(ctx context.Context, request ListExchangeFiltersRequest) listing.Iterator[ExchangeFilter] { + + getNextPage := func(ctx context.Context, req ListExchangeFiltersRequest) (*ListExchangeFiltersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListExchangeFiltersResponse) []ExchangeFilter { + return resp.Filters + } + getNextReq := func(resp *ListExchangeFiltersResponse) *ListExchangeFiltersRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List exchange filters. +// +// List exchange filter +func (a *providerExchangeFiltersPreviewImpl) ListAll(ctx context.Context, request ListExchangeFiltersRequest) ([]ExchangeFilter, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ExchangeFilter](ctx, iterator) +} +func (a *providerExchangeFiltersPreviewImpl) internalList(ctx context.Context, request ListExchangeFiltersRequest) (*ListExchangeFiltersResponse, error) { + var listExchangeFiltersResponse ListExchangeFiltersResponse + path := "/api/2.0preview/marketplace-exchange/filters" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listExchangeFiltersResponse) + return &listExchangeFiltersResponse, err +} + +func (a *providerExchangeFiltersPreviewImpl) Update(ctx context.Context, request UpdateExchangeFilterRequest) (*UpdateExchangeFilterResponse, error) { + var updateExchangeFilterResponse UpdateExchangeFilterResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/filters/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateExchangeFilterResponse) + return &updateExchangeFilterResponse, err +} + +// unexported type that holds implementations of just ProviderExchangesPreview API methods +type providerExchangesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *providerExchangesPreviewImpl) AddListingToExchange(ctx context.Context, request AddExchangeForListingRequest) (*AddExchangeForListingResponse, error) { + var addExchangeForListingResponse AddExchangeForListingResponse + path := "/api/2.0preview/marketplace-exchange/exchanges-for-listing" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &addExchangeForListingResponse) + return &addExchangeForListingResponse, err +} + +func (a *providerExchangesPreviewImpl) Create(ctx context.Context, request CreateExchangeRequest) (*CreateExchangeResponse, error) { + var createExchangeResponse CreateExchangeResponse + path := "/api/2.0preview/marketplace-exchange/exchanges" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createExchangeResponse) + return &createExchangeResponse, err +} + +func (a *providerExchangesPreviewImpl) Delete(ctx context.Context, request DeleteExchangeRequest) error { + var deleteExchangeResponse DeleteExchangeResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/exchanges/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteExchangeResponse) + return err +} + +func (a *providerExchangesPreviewImpl) DeleteListingFromExchange(ctx context.Context, request RemoveExchangeForListingRequest) error { + var removeExchangeForListingResponse RemoveExchangeForListingResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/exchanges-for-listing/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &removeExchangeForListingResponse) + return err +} + +func (a *providerExchangesPreviewImpl) Get(ctx context.Context, request GetExchangeRequest) (*GetExchangeResponse, error) { + var getExchangeResponse GetExchangeResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/exchanges/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getExchangeResponse) + return &getExchangeResponse, err +} + +// List exchanges. +// +// List exchanges visible to provider +func (a *providerExchangesPreviewImpl) List(ctx context.Context, request ListExchangesRequest) listing.Iterator[Exchange] { + + getNextPage := func(ctx context.Context, req ListExchangesRequest) (*ListExchangesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListExchangesResponse) []Exchange { + return resp.Exchanges + } + getNextReq := func(resp *ListExchangesResponse) *ListExchangesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List exchanges. +// +// List exchanges visible to provider +func (a *providerExchangesPreviewImpl) ListAll(ctx context.Context, request ListExchangesRequest) ([]Exchange, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[Exchange](ctx, iterator) +} +func (a *providerExchangesPreviewImpl) internalList(ctx context.Context, request ListExchangesRequest) (*ListExchangesResponse, error) { + var listExchangesResponse ListExchangesResponse + path := "/api/2.0preview/marketplace-exchange/exchanges" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listExchangesResponse) + return &listExchangesResponse, err +} + +// List exchanges for listing. +// +// List exchanges associated with a listing +func (a *providerExchangesPreviewImpl) ListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) listing.Iterator[ExchangeListing] { + + getNextPage := func(ctx context.Context, req ListExchangesForListingRequest) (*ListExchangesForListingResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListExchangesForListing(ctx, req) + } + getItems := func(resp *ListExchangesForListingResponse) []ExchangeListing { + return resp.ExchangeListing + } + getNextReq := func(resp *ListExchangesForListingResponse) *ListExchangesForListingRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List exchanges for listing. +// +// List exchanges associated with a listing +func (a *providerExchangesPreviewImpl) ListExchangesForListingAll(ctx context.Context, request ListExchangesForListingRequest) ([]ExchangeListing, error) { + iterator := a.ListExchangesForListing(ctx, request) + return listing.ToSlice[ExchangeListing](ctx, iterator) +} +func (a *providerExchangesPreviewImpl) internalListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) (*ListExchangesForListingResponse, error) { + var listExchangesForListingResponse ListExchangesForListingResponse + path := "/api/2.0preview/marketplace-exchange/exchanges-for-listing" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listExchangesForListingResponse) + return &listExchangesForListingResponse, err +} + +// List listings for exchange. +// +// List listings associated with an exchange +func (a *providerExchangesPreviewImpl) ListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) listing.Iterator[ExchangeListing] { + + getNextPage := func(ctx context.Context, req ListListingsForExchangeRequest) (*ListListingsForExchangeResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListListingsForExchange(ctx, req) + } + getItems := func(resp *ListListingsForExchangeResponse) []ExchangeListing { + return resp.ExchangeListings + } + getNextReq := func(resp *ListListingsForExchangeResponse) *ListListingsForExchangeRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List listings for exchange. +// +// List listings associated with an exchange +func (a *providerExchangesPreviewImpl) ListListingsForExchangeAll(ctx context.Context, request ListListingsForExchangeRequest) ([]ExchangeListing, error) { + iterator := a.ListListingsForExchange(ctx, request) + return listing.ToSlice[ExchangeListing](ctx, iterator) +} +func (a *providerExchangesPreviewImpl) internalListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) (*ListListingsForExchangeResponse, error) { + var listListingsForExchangeResponse ListListingsForExchangeResponse + path := "/api/2.0preview/marketplace-exchange/listings-for-exchange" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listListingsForExchangeResponse) + return &listListingsForExchangeResponse, err +} + +func (a *providerExchangesPreviewImpl) Update(ctx context.Context, request UpdateExchangeRequest) (*UpdateExchangeResponse, error) { + var updateExchangeResponse UpdateExchangeResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/exchanges/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateExchangeResponse) + return &updateExchangeResponse, err +} + +// unexported type that holds implementations of just ProviderFilesPreview API methods +type providerFilesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *providerFilesPreviewImpl) Create(ctx context.Context, request CreateFileRequest) (*CreateFileResponse, error) { + var createFileResponse CreateFileResponse + path := "/api/2.0preview/marketplace-provider/files" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createFileResponse) + return &createFileResponse, err +} + +func (a *providerFilesPreviewImpl) Delete(ctx context.Context, request DeleteFileRequest) error { + var deleteFileResponse DeleteFileResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-provider/files/%v", request.FileId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteFileResponse) + return err +} + +func (a *providerFilesPreviewImpl) Get(ctx context.Context, request GetFileRequest) (*GetFileResponse, error) { + var getFileResponse GetFileResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-provider/files/%v", request.FileId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getFileResponse) + return &getFileResponse, err +} + +// List files. +// +// List files attached to a parent entity. +func (a *providerFilesPreviewImpl) List(ctx context.Context, request ListFilesRequest) listing.Iterator[FileInfo] { + + getNextPage := func(ctx context.Context, req ListFilesRequest) (*ListFilesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListFilesResponse) []FileInfo { + return resp.FileInfos + } + getNextReq := func(resp *ListFilesResponse) *ListFilesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List files. +// +// List files attached to a parent entity. +func (a *providerFilesPreviewImpl) ListAll(ctx context.Context, request ListFilesRequest) ([]FileInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[FileInfo](ctx, iterator) +} +func (a *providerFilesPreviewImpl) internalList(ctx context.Context, request ListFilesRequest) (*ListFilesResponse, error) { + var listFilesResponse ListFilesResponse + path := "/api/2.0preview/marketplace-provider/files" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listFilesResponse) + return &listFilesResponse, err +} + +// unexported type that holds implementations of just ProviderListingsPreview API methods +type providerListingsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *providerListingsPreviewImpl) Create(ctx context.Context, request CreateListingRequest) (*CreateListingResponse, error) { + var createListingResponse CreateListingResponse + path := "/api/2.0preview/marketplace-provider/listing" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createListingResponse) + return &createListingResponse, err +} + +func (a *providerListingsPreviewImpl) Delete(ctx context.Context, request DeleteListingRequest) error { + var deleteListingResponse DeleteListingResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-provider/listings/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteListingResponse) + return err +} + +func (a *providerListingsPreviewImpl) Get(ctx context.Context, request GetListingRequest) (*GetListingResponse, error) { + var getListingResponse GetListingResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-provider/listings/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getListingResponse) + return &getListingResponse, err +} + +// List listings. +// +// List listings owned by this provider +func (a *providerListingsPreviewImpl) List(ctx context.Context, request GetListingsRequest) listing.Iterator[Listing] { + + getNextPage := func(ctx context.Context, req GetListingsRequest) (*GetListingsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *GetListingsResponse) []Listing { + return resp.Listings + } + getNextReq := func(resp *GetListingsResponse) *GetListingsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List listings. +// +// List listings owned by this provider +func (a *providerListingsPreviewImpl) ListAll(ctx context.Context, request GetListingsRequest) ([]Listing, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[Listing](ctx, iterator) +} +func (a *providerListingsPreviewImpl) internalList(ctx context.Context, request GetListingsRequest) (*GetListingsResponse, error) { + var getListingsResponse GetListingsResponse + path := "/api/2.0preview/marketplace-provider/listings" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getListingsResponse) + return &getListingsResponse, err +} + +func (a *providerListingsPreviewImpl) Update(ctx context.Context, request UpdateListingRequest) (*UpdateListingResponse, error) { + var updateListingResponse UpdateListingResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-provider/listings/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateListingResponse) + return &updateListingResponse, err +} + +// unexported type that holds implementations of just ProviderPersonalizationRequestsPreview API methods +type providerPersonalizationRequestsPreviewImpl struct { + client *client.DatabricksClient +} + +// All personalization requests across all listings. +// +// List personalization requests to this provider. This will return all +// personalization requests, regardless of which listing they are for. +func (a *providerPersonalizationRequestsPreviewImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] { + + getNextPage := func(ctx context.Context, req ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAllPersonalizationRequestsResponse) []PersonalizationRequest { + return resp.PersonalizationRequests + } + getNextReq := func(resp *ListAllPersonalizationRequestsResponse) *ListAllPersonalizationRequestsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// All personalization requests across all listings. +// +// List personalization requests to this provider. This will return all +// personalization requests, regardless of which listing they are for. +func (a *providerPersonalizationRequestsPreviewImpl) ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[PersonalizationRequest](ctx, iterator) +} +func (a *providerPersonalizationRequestsPreviewImpl) internalList(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { + var listAllPersonalizationRequestsResponse ListAllPersonalizationRequestsResponse + path := "/api/2.0preview/marketplace-provider/personalization-requests" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAllPersonalizationRequestsResponse) + return &listAllPersonalizationRequestsResponse, err +} + +func (a *providerPersonalizationRequestsPreviewImpl) Update(ctx context.Context, request UpdatePersonalizationRequestRequest) (*UpdatePersonalizationRequestResponse, error) { + var updatePersonalizationRequestResponse UpdatePersonalizationRequestResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-provider/listings/%v/personalization-requests/%v/request-status", request.ListingId, request.RequestId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updatePersonalizationRequestResponse) + return &updatePersonalizationRequestResponse, err +} + +// unexported type that holds implementations of just ProviderProviderAnalyticsDashboardsPreview API methods +type providerProviderAnalyticsDashboardsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *providerProviderAnalyticsDashboardsPreviewImpl) Create(ctx context.Context) (*ProviderAnalyticsDashboard, error) { + var providerAnalyticsDashboard ProviderAnalyticsDashboard + path := "/api/2.0preview/marketplace-provider/analytics_dashboard" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, nil, nil, &providerAnalyticsDashboard) + return &providerAnalyticsDashboard, err +} + +func (a *providerProviderAnalyticsDashboardsPreviewImpl) Get(ctx context.Context) (*ListProviderAnalyticsDashboardResponse, error) { + var listProviderAnalyticsDashboardResponse ListProviderAnalyticsDashboardResponse + path := "/api/2.0preview/marketplace-provider/analytics_dashboard" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listProviderAnalyticsDashboardResponse) + return &listProviderAnalyticsDashboardResponse, err +} + +func (a *providerProviderAnalyticsDashboardsPreviewImpl) GetLatestVersion(ctx context.Context) (*GetLatestVersionProviderAnalyticsDashboardResponse, error) { + var getLatestVersionProviderAnalyticsDashboardResponse GetLatestVersionProviderAnalyticsDashboardResponse + path := "/api/2.0preview/marketplace-provider/analytics_dashboard/latest" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getLatestVersionProviderAnalyticsDashboardResponse) + return &getLatestVersionProviderAnalyticsDashboardResponse, err +} + +func (a *providerProviderAnalyticsDashboardsPreviewImpl) Update(ctx context.Context, request UpdateProviderAnalyticsDashboardRequest) (*UpdateProviderAnalyticsDashboardResponse, error) { + var updateProviderAnalyticsDashboardResponse UpdateProviderAnalyticsDashboardResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-provider/analytics_dashboard/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateProviderAnalyticsDashboardResponse) + return &updateProviderAnalyticsDashboardResponse, err +} + +// unexported type that holds implementations of just ProviderProvidersPreview API methods +type providerProvidersPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *providerProvidersPreviewImpl) Create(ctx context.Context, request CreateProviderRequest) (*CreateProviderResponse, error) { + var createProviderResponse CreateProviderResponse + path := "/api/2.0preview/marketplace-provider/provider" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createProviderResponse) + return &createProviderResponse, err +} + +func (a *providerProvidersPreviewImpl) Delete(ctx context.Context, request DeleteProviderRequest) error { + var deleteProviderResponse DeleteProviderResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-provider/providers/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteProviderResponse) + return err +} + +func (a *providerProvidersPreviewImpl) Get(ctx context.Context, request GetProviderRequest) (*GetProviderResponse, error) { + var getProviderResponse GetProviderResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-provider/providers/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getProviderResponse) + return &getProviderResponse, err +} + +// List providers. +// +// List provider profiles for account. +func (a *providerProvidersPreviewImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { + + getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListProvidersResponse) []ProviderInfo { + return resp.Providers + } + getNextReq := func(resp *ListProvidersResponse) *ListProvidersRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List providers. +// +// List provider profiles for account. +func (a *providerProvidersPreviewImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ProviderInfo](ctx, iterator) +} +func (a *providerProvidersPreviewImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { + var listProvidersResponse ListProvidersResponse + path := "/api/2.0preview/marketplace-provider/providers" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listProvidersResponse) + return &listProvidersResponse, err +} + +func (a *providerProvidersPreviewImpl) Update(ctx context.Context, request UpdateProviderRequest) (*UpdateProviderResponse, error) { + var updateProviderResponse UpdateProviderResponse + path := fmt.Sprintf("/api/2.0preview/marketplace-provider/providers/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateProviderResponse) + return &updateProviderResponse, err +} diff --git a/marketplace/v2preview/model.go b/marketplace/v2preview/model.go new file mode 100755 index 000000000..86731f5dd --- /dev/null +++ b/marketplace/v2preview/model.go @@ -0,0 +1,2074 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package marketplacepreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type AddExchangeForListingRequest struct { + ExchangeId string `json:"exchange_id"` + + ListingId string `json:"listing_id"` +} + +type AddExchangeForListingResponse struct { + ExchangeForListing *ExchangeListing `json:"exchange_for_listing,omitempty"` +} + +type AssetType string + +const AssetTypeAssetTypeDataTable AssetType = `ASSET_TYPE_DATA_TABLE` + +const AssetTypeAssetTypeGitRepo AssetType = `ASSET_TYPE_GIT_REPO` + +const AssetTypeAssetTypeMedia AssetType = `ASSET_TYPE_MEDIA` + +const AssetTypeAssetTypeModel AssetType = `ASSET_TYPE_MODEL` + +const AssetTypeAssetTypeNotebook AssetType = `ASSET_TYPE_NOTEBOOK` + +const AssetTypeAssetTypePartnerIntegration AssetType = `ASSET_TYPE_PARTNER_INTEGRATION` + +// String representation for [fmt.Print] +func (f *AssetType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AssetType) Set(v string) error { + switch v { + case `ASSET_TYPE_DATA_TABLE`, `ASSET_TYPE_GIT_REPO`, `ASSET_TYPE_MEDIA`, `ASSET_TYPE_MODEL`, `ASSET_TYPE_NOTEBOOK`, `ASSET_TYPE_PARTNER_INTEGRATION`: + *f = AssetType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ASSET_TYPE_DATA_TABLE", "ASSET_TYPE_GIT_REPO", "ASSET_TYPE_MEDIA", "ASSET_TYPE_MODEL", "ASSET_TYPE_NOTEBOOK", "ASSET_TYPE_PARTNER_INTEGRATION"`, v) + } +} + +// Type always returns AssetType to satisfy [pflag.Value] interface +func (f *AssetType) Type() string { + return "AssetType" +} + +// Get one batch of listings. One may specify up to 50 IDs per request. +type BatchGetListingsRequest struct { + Ids []string `json:"-" url:"ids,omitempty"` +} + +type BatchGetListingsResponse struct { + Listings []Listing `json:"listings,omitempty"` +} + +// Get one batch of providers. One may specify up to 50 IDs per request. +type BatchGetProvidersRequest struct { + Ids []string `json:"-" url:"ids,omitempty"` +} + +type BatchGetProvidersResponse struct { + Providers []ProviderInfo `json:"providers,omitempty"` +} + +type Category string + +const CategoryAdvertisingAndMarketing Category = `ADVERTISING_AND_MARKETING` + +const CategoryClimateAndEnvironment Category = `CLIMATE_AND_ENVIRONMENT` + +const CategoryCommerce Category = `COMMERCE` + +const CategoryDemographics Category = `DEMOGRAPHICS` + +const CategoryEconomics Category = `ECONOMICS` + +const CategoryEducation Category = `EDUCATION` + +const CategoryEnergy Category = `ENERGY` + +const CategoryFinancial Category = `FINANCIAL` + +const CategoryGaming Category = `GAMING` + +const CategoryGeospatial Category = `GEOSPATIAL` + +const CategoryHealth Category = `HEALTH` + +const CategoryLookupTables Category = `LOOKUP_TABLES` + +const CategoryManufacturing Category = `MANUFACTURING` + +const CategoryMedia Category = `MEDIA` + +const CategoryOther Category = `OTHER` + +const CategoryPublicSector Category = `PUBLIC_SECTOR` + +const CategoryRetail Category = `RETAIL` + +const CategoryScienceAndResearch Category = `SCIENCE_AND_RESEARCH` + +const CategorySecurity Category = `SECURITY` + +const CategorySports Category = `SPORTS` + +const CategoryTransportationAndLogistics Category = `TRANSPORTATION_AND_LOGISTICS` + +const CategoryTravelAndTourism Category = `TRAVEL_AND_TOURISM` + +// String representation for [fmt.Print] +func (f *Category) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Category) Set(v string) error { + switch v { + case `ADVERTISING_AND_MARKETING`, `CLIMATE_AND_ENVIRONMENT`, `COMMERCE`, `DEMOGRAPHICS`, `ECONOMICS`, `EDUCATION`, `ENERGY`, `FINANCIAL`, `GAMING`, `GEOSPATIAL`, `HEALTH`, `LOOKUP_TABLES`, `MANUFACTURING`, `MEDIA`, `OTHER`, `PUBLIC_SECTOR`, `RETAIL`, `SCIENCE_AND_RESEARCH`, `SECURITY`, `SPORTS`, `TRANSPORTATION_AND_LOGISTICS`, `TRAVEL_AND_TOURISM`: + *f = Category(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ADVERTISING_AND_MARKETING", "CLIMATE_AND_ENVIRONMENT", "COMMERCE", "DEMOGRAPHICS", "ECONOMICS", "EDUCATION", "ENERGY", "FINANCIAL", "GAMING", "GEOSPATIAL", "HEALTH", "LOOKUP_TABLES", "MANUFACTURING", "MEDIA", "OTHER", "PUBLIC_SECTOR", "RETAIL", "SCIENCE_AND_RESEARCH", "SECURITY", "SPORTS", "TRANSPORTATION_AND_LOGISTICS", "TRAVEL_AND_TOURISM"`, v) + } +} + +// Type always returns Category to satisfy [pflag.Value] interface +func (f *Category) Type() string { + return "Category" +} + +type ConsumerTerms struct { + Version string `json:"version"` +} + +// contact info for the consumer requesting data or performing a listing +// installation +type ContactInfo struct { + Company string `json:"company,omitempty"` + + Email string `json:"email,omitempty"` + + FirstName string `json:"first_name,omitempty"` + + LastName string `json:"last_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ContactInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ContactInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Cost string + +const CostFree Cost = `FREE` + +const CostPaid Cost = `PAID` + +// String representation for [fmt.Print] +func (f *Cost) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Cost) Set(v string) error { + switch v { + case `FREE`, `PAID`: + *f = Cost(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FREE", "PAID"`, v) + } +} + +// Type always returns Cost to satisfy [pflag.Value] interface +func (f *Cost) Type() string { + return "Cost" +} + +type CreateExchangeFilterRequest struct { + Filter ExchangeFilter `json:"filter"` +} + +type CreateExchangeFilterResponse struct { + FilterId string `json:"filter_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateExchangeFilterResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateExchangeFilterResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateExchangeRequest struct { + Exchange Exchange `json:"exchange"` +} + +type CreateExchangeResponse struct { + ExchangeId string `json:"exchange_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateExchangeResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateExchangeResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateFileRequest struct { + DisplayName string `json:"display_name,omitempty"` + + FileParent FileParent `json:"file_parent"` + + MarketplaceFileType MarketplaceFileType `json:"marketplace_file_type"` + + MimeType string `json:"mime_type"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateFileRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateFileRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateFileResponse struct { + FileInfo *FileInfo `json:"file_info,omitempty"` + // Pre-signed POST URL to blob storage + UploadUrl string `json:"upload_url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateFileResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateFileResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateInstallationRequest struct { + AcceptedConsumerTerms *ConsumerTerms `json:"accepted_consumer_terms,omitempty"` + + CatalogName string `json:"catalog_name,omitempty"` + + ListingId string `json:"-" url:"-"` + + RecipientType DeltaSharingRecipientType `json:"recipient_type,omitempty"` + // for git repo installations + RepoDetail *RepoInstallation `json:"repo_detail,omitempty"` + + ShareName string `json:"share_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateInstallationRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateInstallationRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateListingRequest struct { + Listing Listing `json:"listing"` +} + +type CreateListingResponse struct { + ListingId string `json:"listing_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateListingResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateListingResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Data request messages also creates a lead (maybe) +type CreatePersonalizationRequest struct { + AcceptedConsumerTerms ConsumerTerms `json:"accepted_consumer_terms"` + + Comment string `json:"comment,omitempty"` + + Company string `json:"company,omitempty"` + + FirstName string `json:"first_name,omitempty"` + + IntendedUse string `json:"intended_use"` + + IsFromLighthouse bool `json:"is_from_lighthouse,omitempty"` + + LastName string `json:"last_name,omitempty"` + + ListingId string `json:"-" url:"-"` + + RecipientType DeltaSharingRecipientType `json:"recipient_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreatePersonalizationRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreatePersonalizationRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreatePersonalizationRequestResponse struct { + Id string `json:"id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreatePersonalizationRequestResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreatePersonalizationRequestResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateProviderRequest struct { + Provider ProviderInfo `json:"provider"` +} + +type CreateProviderResponse struct { + Id string `json:"id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateProviderResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateProviderResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DataRefresh string + +const DataRefreshDaily DataRefresh = `DAILY` + +const DataRefreshHourly DataRefresh = `HOURLY` + +const DataRefreshMinute DataRefresh = `MINUTE` + +const DataRefreshMonthly DataRefresh = `MONTHLY` + +const DataRefreshNone DataRefresh = `NONE` + +const DataRefreshQuarterly DataRefresh = `QUARTERLY` + +const DataRefreshSecond DataRefresh = `SECOND` + +const DataRefreshWeekly DataRefresh = `WEEKLY` + +const DataRefreshYearly DataRefresh = `YEARLY` + +// String representation for [fmt.Print] +func (f *DataRefresh) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DataRefresh) Set(v string) error { + switch v { + case `DAILY`, `HOURLY`, `MINUTE`, `MONTHLY`, `NONE`, `QUARTERLY`, `SECOND`, `WEEKLY`, `YEARLY`: + *f = DataRefresh(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DAILY", "HOURLY", "MINUTE", "MONTHLY", "NONE", "QUARTERLY", "SECOND", "WEEKLY", "YEARLY"`, v) + } +} + +// Type always returns DataRefresh to satisfy [pflag.Value] interface +func (f *DataRefresh) Type() string { + return "DataRefresh" +} + +type DataRefreshInfo struct { + Interval int64 `json:"interval"` + + Unit DataRefresh `json:"unit"` +} + +// Delete an exchange filter +type DeleteExchangeFilterRequest struct { + Id string `json:"-" url:"-"` +} + +type DeleteExchangeFilterResponse struct { +} + +// Delete an exchange +type DeleteExchangeRequest struct { + Id string `json:"-" url:"-"` +} + +type DeleteExchangeResponse struct { +} + +// Delete a file +type DeleteFileRequest struct { + FileId string `json:"-" url:"-"` +} + +type DeleteFileResponse struct { +} + +// Uninstall from a listing +type DeleteInstallationRequest struct { + InstallationId string `json:"-" url:"-"` + + ListingId string `json:"-" url:"-"` +} + +type DeleteInstallationResponse struct { +} + +// Delete a listing +type DeleteListingRequest struct { + Id string `json:"-" url:"-"` +} + +type DeleteListingResponse struct { +} + +// Delete provider +type DeleteProviderRequest struct { + Id string `json:"-" url:"-"` +} + +type DeleteProviderResponse struct { +} + +type DeltaSharingRecipientType string + +const DeltaSharingRecipientTypeDeltaSharingRecipientTypeDatabricks DeltaSharingRecipientType = `DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS` + +const DeltaSharingRecipientTypeDeltaSharingRecipientTypeOpen DeltaSharingRecipientType = `DELTA_SHARING_RECIPIENT_TYPE_OPEN` + +// String representation for [fmt.Print] +func (f *DeltaSharingRecipientType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DeltaSharingRecipientType) Set(v string) error { + switch v { + case `DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS`, `DELTA_SHARING_RECIPIENT_TYPE_OPEN`: + *f = DeltaSharingRecipientType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS", "DELTA_SHARING_RECIPIENT_TYPE_OPEN"`, v) + } +} + +// Type always returns DeltaSharingRecipientType to satisfy [pflag.Value] interface +func (f *DeltaSharingRecipientType) Type() string { + return "DeltaSharingRecipientType" +} + +type Exchange struct { + Comment string `json:"comment,omitempty"` + + CreatedAt int64 `json:"created_at,omitempty"` + + CreatedBy string `json:"created_by,omitempty"` + + Filters []ExchangeFilter `json:"filters,omitempty"` + + Id string `json:"id,omitempty"` + + LinkedListings []ExchangeListing `json:"linked_listings,omitempty"` + + Name string `json:"name"` + + UpdatedAt int64 `json:"updated_at,omitempty"` + + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Exchange) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Exchange) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExchangeFilter struct { + CreatedAt int64 `json:"created_at,omitempty"` + + CreatedBy string `json:"created_by,omitempty"` + + ExchangeId string `json:"exchange_id"` + + FilterType ExchangeFilterType `json:"filter_type"` + + FilterValue string `json:"filter_value"` + + Id string `json:"id,omitempty"` + + Name string `json:"name,omitempty"` + + UpdatedAt int64 `json:"updated_at,omitempty"` + + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExchangeFilter) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExchangeFilter) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExchangeFilterType string + +const ExchangeFilterTypeGlobalMetastoreId ExchangeFilterType = `GLOBAL_METASTORE_ID` + +// String representation for [fmt.Print] +func (f *ExchangeFilterType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExchangeFilterType) Set(v string) error { + switch v { + case `GLOBAL_METASTORE_ID`: + *f = ExchangeFilterType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GLOBAL_METASTORE_ID"`, v) + } +} + +// Type always returns ExchangeFilterType to satisfy [pflag.Value] interface +func (f *ExchangeFilterType) Type() string { + return "ExchangeFilterType" +} + +type ExchangeListing struct { + CreatedAt int64 `json:"created_at,omitempty"` + + CreatedBy string `json:"created_by,omitempty"` + + ExchangeId string `json:"exchange_id,omitempty"` + + ExchangeName string `json:"exchange_name,omitempty"` + + Id string `json:"id,omitempty"` + + ListingId string `json:"listing_id,omitempty"` + + ListingName string `json:"listing_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExchangeListing) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExchangeListing) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type FileInfo struct { + CreatedAt int64 `json:"created_at,omitempty"` + // Name displayed to users for applicable files, e.g. embedded notebooks + DisplayName string `json:"display_name,omitempty"` + + DownloadLink string `json:"download_link,omitempty"` + + FileParent *FileParent `json:"file_parent,omitempty"` + + Id string `json:"id,omitempty"` + + MarketplaceFileType MarketplaceFileType `json:"marketplace_file_type,omitempty"` + + MimeType string `json:"mime_type,omitempty"` + + Status FileStatus `json:"status,omitempty"` + // Populated if status is in a failed state with more information on reason + // for the failure. + StatusMessage string `json:"status_message,omitempty"` + + UpdatedAt int64 `json:"updated_at,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *FileInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FileInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type FileParent struct { + FileParentType FileParentType `json:"file_parent_type,omitempty"` + // TODO make the following fields required + ParentId string `json:"parent_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *FileParent) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FileParent) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type FileParentType string + +const FileParentTypeListing FileParentType = `LISTING` + +const FileParentTypeProvider FileParentType = `PROVIDER` + +// String representation for [fmt.Print] +func (f *FileParentType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FileParentType) Set(v string) error { + switch v { + case `LISTING`, `PROVIDER`: + *f = FileParentType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "LISTING", "PROVIDER"`, v) + } +} + +// Type always returns FileParentType to satisfy [pflag.Value] interface +func (f *FileParentType) Type() string { + return "FileParentType" +} + +type FileStatus string + +const FileStatusFileStatusPublished FileStatus = `FILE_STATUS_PUBLISHED` + +const FileStatusFileStatusSanitizationFailed FileStatus = `FILE_STATUS_SANITIZATION_FAILED` + +const FileStatusFileStatusSanitizing FileStatus = `FILE_STATUS_SANITIZING` + +const FileStatusFileStatusStaging FileStatus = `FILE_STATUS_STAGING` + +// String representation for [fmt.Print] +func (f *FileStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FileStatus) Set(v string) error { + switch v { + case `FILE_STATUS_PUBLISHED`, `FILE_STATUS_SANITIZATION_FAILED`, `FILE_STATUS_SANITIZING`, `FILE_STATUS_STAGING`: + *f = FileStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FILE_STATUS_PUBLISHED", "FILE_STATUS_SANITIZATION_FAILED", "FILE_STATUS_SANITIZING", "FILE_STATUS_STAGING"`, v) + } +} + +// Type always returns FileStatus to satisfy [pflag.Value] interface +func (f *FileStatus) Type() string { + return "FileStatus" +} + +type FulfillmentType string + +const FulfillmentTypeInstall FulfillmentType = `INSTALL` + +const FulfillmentTypeRequestAccess FulfillmentType = `REQUEST_ACCESS` + +// String representation for [fmt.Print] +func (f *FulfillmentType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FulfillmentType) Set(v string) error { + switch v { + case `INSTALL`, `REQUEST_ACCESS`: + *f = FulfillmentType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "INSTALL", "REQUEST_ACCESS"`, v) + } +} + +// Type always returns FulfillmentType to satisfy [pflag.Value] interface +func (f *FulfillmentType) Type() string { + return "FulfillmentType" +} + +// Get an exchange +type GetExchangeRequest struct { + Id string `json:"-" url:"-"` +} + +type GetExchangeResponse struct { + Exchange *Exchange `json:"exchange,omitempty"` +} + +// Get a file +type GetFileRequest struct { + FileId string `json:"-" url:"-"` +} + +type GetFileResponse struct { + FileInfo *FileInfo `json:"file_info,omitempty"` +} + +type GetLatestVersionProviderAnalyticsDashboardResponse struct { + // version here is latest logical version of the dashboard template + Version int64 `json:"version,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetLatestVersionProviderAnalyticsDashboardResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetLatestVersionProviderAnalyticsDashboardResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get listing content metadata +type GetListingContentMetadataRequest struct { + ListingId string `json:"-" url:"-"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetListingContentMetadataRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetListingContentMetadataRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GetListingContentMetadataResponse struct { + NextPageToken string `json:"next_page_token,omitempty"` + + SharedDataObjects []SharedDataObject `json:"shared_data_objects,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetListingContentMetadataResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetListingContentMetadataResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get listing +type GetListingRequest struct { + Id string `json:"-" url:"-"` +} + +type GetListingResponse struct { + Listing *Listing `json:"listing,omitempty"` +} + +// List listings +type GetListingsRequest struct { + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetListingsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetListingsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GetListingsResponse struct { + Listings []Listing `json:"listings,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetListingsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetListingsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the personalization request for a listing +type GetPersonalizationRequestRequest struct { + ListingId string `json:"-" url:"-"` +} + +type GetPersonalizationRequestResponse struct { + PersonalizationRequests []PersonalizationRequest `json:"personalization_requests,omitempty"` +} + +// Get a provider +type GetProviderRequest struct { + Id string `json:"-" url:"-"` +} + +type GetProviderResponse struct { + Provider *ProviderInfo `json:"provider,omitempty"` +} + +type Installation struct { + Installation *InstallationDetail `json:"installation,omitempty"` +} + +type InstallationDetail struct { + CatalogName string `json:"catalog_name,omitempty"` + + ErrorMessage string `json:"error_message,omitempty"` + + Id string `json:"id,omitempty"` + + InstalledOn int64 `json:"installed_on,omitempty"` + + ListingId string `json:"listing_id,omitempty"` + + ListingName string `json:"listing_name,omitempty"` + + RecipientType DeltaSharingRecipientType `json:"recipient_type,omitempty"` + + RepoName string `json:"repo_name,omitempty"` + + RepoPath string `json:"repo_path,omitempty"` + + ShareName string `json:"share_name,omitempty"` + + Status InstallationStatus `json:"status,omitempty"` + + TokenDetail *TokenDetail `json:"token_detail,omitempty"` + + Tokens []TokenInfo `json:"tokens,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InstallationDetail) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InstallationDetail) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InstallationStatus string + +const InstallationStatusFailed InstallationStatus = `FAILED` + +const InstallationStatusInstalled InstallationStatus = `INSTALLED` + +// String representation for [fmt.Print] +func (f *InstallationStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InstallationStatus) Set(v string) error { + switch v { + case `FAILED`, `INSTALLED`: + *f = InstallationStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED", "INSTALLED"`, v) + } +} + +// Type always returns InstallationStatus to satisfy [pflag.Value] interface +func (f *InstallationStatus) Type() string { + return "InstallationStatus" +} + +// List all installations +type ListAllInstallationsRequest struct { + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAllInstallationsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAllInstallationsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListAllInstallationsResponse struct { + Installations []InstallationDetail `json:"installations,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAllInstallationsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAllInstallationsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List all personalization requests +type ListAllPersonalizationRequestsRequest struct { + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAllPersonalizationRequestsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAllPersonalizationRequestsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListAllPersonalizationRequestsResponse struct { + NextPageToken string `json:"next_page_token,omitempty"` + + PersonalizationRequests []PersonalizationRequest `json:"personalization_requests,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAllPersonalizationRequestsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAllPersonalizationRequestsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List exchange filters +type ListExchangeFiltersRequest struct { + ExchangeId string `json:"-" url:"exchange_id"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListExchangeFiltersRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListExchangeFiltersRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListExchangeFiltersResponse struct { + Filters []ExchangeFilter `json:"filters,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListExchangeFiltersResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListExchangeFiltersResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List exchanges for listing +type ListExchangesForListingRequest struct { + ListingId string `json:"-" url:"listing_id"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListExchangesForListingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListExchangesForListingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListExchangesForListingResponse struct { + ExchangeListing []ExchangeListing `json:"exchange_listing,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListExchangesForListingResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListExchangesForListingResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List exchanges +type ListExchangesRequest struct { + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListExchangesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListExchangesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListExchangesResponse struct { + Exchanges []Exchange `json:"exchanges,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListExchangesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListExchangesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List files +type ListFilesRequest struct { + FileParent FileParent `json:"-" url:"file_parent"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListFilesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListFilesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListFilesResponse struct { + FileInfos []FileInfo `json:"file_infos,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListFilesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListFilesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List all listing fulfillments +type ListFulfillmentsRequest struct { + ListingId string `json:"-" url:"-"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListFulfillmentsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListFulfillmentsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListFulfillmentsResponse struct { + Fulfillments []ListingFulfillment `json:"fulfillments,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListFulfillmentsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListFulfillmentsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List installations for a listing +type ListInstallationsRequest struct { + ListingId string `json:"-" url:"-"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListInstallationsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListInstallationsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListInstallationsResponse struct { + Installations []InstallationDetail `json:"installations,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListInstallationsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListInstallationsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List listings for exchange +type ListListingsForExchangeRequest struct { + ExchangeId string `json:"-" url:"exchange_id"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListListingsForExchangeRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListListingsForExchangeRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListListingsForExchangeResponse struct { + ExchangeListings []ExchangeListing `json:"exchange_listings,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListListingsForExchangeResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListListingsForExchangeResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List listings +type ListListingsRequest struct { + // Matches any of the following asset types + Assets []AssetType `json:"-" url:"assets,omitempty"` + // Matches any of the following categories + Categories []Category `json:"-" url:"categories,omitempty"` + // Filters each listing based on if it is free. + IsFree bool `json:"-" url:"is_free,omitempty"` + // Filters each listing based on if it is a private exchange. + IsPrivateExchange bool `json:"-" url:"is_private_exchange,omitempty"` + // Filters each listing based on whether it is a staff pick. + IsStaffPick bool `json:"-" url:"is_staff_pick,omitempty"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + // Matches any of the following provider ids + ProviderIds []string `json:"-" url:"provider_ids,omitempty"` + // Matches any of the following tags + Tags []ListingTag `json:"-" url:"tags,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListListingsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListListingsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListListingsResponse struct { + Listings []Listing `json:"listings,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListListingsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListListingsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListProviderAnalyticsDashboardResponse struct { + // dashboard_id will be used to open Lakeview dashboard. + DashboardId string `json:"dashboard_id"` + + Id string `json:"id"` + + Version int64 `json:"version,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListProviderAnalyticsDashboardResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListProviderAnalyticsDashboardResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List providers +type ListProvidersRequest struct { + IsFeatured bool `json:"-" url:"is_featured,omitempty"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListProvidersRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListProvidersRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListProvidersResponse struct { + NextPageToken string `json:"next_page_token,omitempty"` + + Providers []ProviderInfo `json:"providers,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListProvidersResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListProvidersResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Listing struct { + Detail *ListingDetail `json:"detail,omitempty"` + + Id string `json:"id,omitempty"` + // Next Number: 26 + Summary ListingSummary `json:"summary"` + + ForceSendFields []string `json:"-"` +} + +func (s *Listing) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Listing) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListingDetail struct { + // Type of assets included in the listing. eg. GIT_REPO, DATA_TABLE, MODEL, + // NOTEBOOK + Assets []AssetType `json:"assets,omitempty"` + // The ending date timestamp for when the data spans + CollectionDateEnd int64 `json:"collection_date_end,omitempty"` + // The starting date timestamp for when the data spans + CollectionDateStart int64 `json:"collection_date_start,omitempty"` + // Smallest unit of time in the dataset + CollectionGranularity *DataRefreshInfo `json:"collection_granularity,omitempty"` + // Whether the dataset is free or paid + Cost Cost `json:"cost,omitempty"` + // Where/how the data is sourced + DataSource string `json:"data_source,omitempty"` + + Description string `json:"description,omitempty"` + + DocumentationLink string `json:"documentation_link,omitempty"` + + EmbeddedNotebookFileInfos []FileInfo `json:"embedded_notebook_file_infos,omitempty"` + + FileIds []string `json:"file_ids,omitempty"` + // Which geo region the listing data is collected from + GeographicalCoverage string `json:"geographical_coverage,omitempty"` + // ID 20, 21 removed don't use License of the data asset - Required for + // listings with model based assets + License string `json:"license,omitempty"` + // What the pricing model is (e.g. paid, subscription, paid upfront); should + // only be present if cost is paid TODO: Not used yet, should deprecate if + // we will never use it + PricingModel string `json:"pricing_model,omitempty"` + + PrivacyPolicyLink string `json:"privacy_policy_link,omitempty"` + // size of the dataset in GB + Size float64 `json:"size,omitempty"` + + SupportLink string `json:"support_link,omitempty"` + // Listing tags - Simple key value pair to annotate listings. When should I + // use tags vs dedicated fields? Using tags avoids the need to add new + // columns in the database for new annotations. However, this should be used + // sparingly since tags are stored as key value pair. Use tags only: 1. If + // the field is optional and won't need to have NOT NULL integrity check 2. + // The value is fairly fixed, static and low cardinality (eg. enums). 3. The + // value won't be used in filters or joins with other tables. + Tags []ListingTag `json:"tags,omitempty"` + + TermsOfService string `json:"terms_of_service,omitempty"` + // How often data is updated + UpdateFrequency *DataRefreshInfo `json:"update_frequency,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListingDetail) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListingDetail) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListingFulfillment struct { + FulfillmentType FulfillmentType `json:"fulfillment_type,omitempty"` + + ListingId string `json:"listing_id"` + + RecipientType DeltaSharingRecipientType `json:"recipient_type,omitempty"` + + RepoInfo *RepoInfo `json:"repo_info,omitempty"` + + ShareInfo *ShareInfo `json:"share_info,omitempty"` +} + +type ListingSetting struct { + Visibility Visibility `json:"visibility,omitempty"` +} + +type ListingShareType string + +const ListingShareTypeFull ListingShareType = `FULL` + +const ListingShareTypeSample ListingShareType = `SAMPLE` + +// String representation for [fmt.Print] +func (f *ListingShareType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListingShareType) Set(v string) error { + switch v { + case `FULL`, `SAMPLE`: + *f = ListingShareType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FULL", "SAMPLE"`, v) + } +} + +// Type always returns ListingShareType to satisfy [pflag.Value] interface +func (f *ListingShareType) Type() string { + return "ListingShareType" +} + +// Enums +type ListingStatus string + +const ListingStatusDraft ListingStatus = `DRAFT` + +const ListingStatusPending ListingStatus = `PENDING` + +const ListingStatusPublished ListingStatus = `PUBLISHED` + +const ListingStatusSuspended ListingStatus = `SUSPENDED` + +// String representation for [fmt.Print] +func (f *ListingStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListingStatus) Set(v string) error { + switch v { + case `DRAFT`, `PENDING`, `PUBLISHED`, `SUSPENDED`: + *f = ListingStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DRAFT", "PENDING", "PUBLISHED", "SUSPENDED"`, v) + } +} + +// Type always returns ListingStatus to satisfy [pflag.Value] interface +func (f *ListingStatus) Type() string { + return "ListingStatus" +} + +// Next Number: 26 +type ListingSummary struct { + Categories []Category `json:"categories,omitempty"` + + CreatedAt int64 `json:"created_at,omitempty"` + + CreatedBy string `json:"created_by,omitempty"` + + CreatedById int64 `json:"created_by_id,omitempty"` + + ExchangeIds []string `json:"exchange_ids,omitempty"` + // if a git repo is being created, a listing will be initialized with this + // field as opposed to a share + GitRepo *RepoInfo `json:"git_repo,omitempty"` + + ListingType ListingType `json:"listingType"` + + Name string `json:"name"` + + ProviderId string `json:"provider_id,omitempty"` + + ProviderRegion *RegionInfo `json:"provider_region,omitempty"` + + PublishedAt int64 `json:"published_at,omitempty"` + + PublishedBy string `json:"published_by,omitempty"` + + Setting *ListingSetting `json:"setting,omitempty"` + + Share *ShareInfo `json:"share,omitempty"` + // Enums + Status ListingStatus `json:"status,omitempty"` + + Subtitle string `json:"subtitle,omitempty"` + + UpdatedAt int64 `json:"updated_at,omitempty"` + + UpdatedBy string `json:"updated_by,omitempty"` + + UpdatedById int64 `json:"updated_by_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListingSummary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListingSummary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListingTag struct { + // Tag name (enum) + TagName ListingTagType `json:"tag_name,omitempty"` + // String representation of the tag value. Values should be string literals + // (no complex types) + TagValues []string `json:"tag_values,omitempty"` +} + +type ListingTagType string + +const ListingTagTypeListingTagTypeLanguage ListingTagType = `LISTING_TAG_TYPE_LANGUAGE` + +const ListingTagTypeListingTagTypeTask ListingTagType = `LISTING_TAG_TYPE_TASK` + +// String representation for [fmt.Print] +func (f *ListingTagType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListingTagType) Set(v string) error { + switch v { + case `LISTING_TAG_TYPE_LANGUAGE`, `LISTING_TAG_TYPE_TASK`: + *f = ListingTagType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "LISTING_TAG_TYPE_LANGUAGE", "LISTING_TAG_TYPE_TASK"`, v) + } +} + +// Type always returns ListingTagType to satisfy [pflag.Value] interface +func (f *ListingTagType) Type() string { + return "ListingTagType" +} + +type ListingType string + +const ListingTypePersonalized ListingType = `PERSONALIZED` + +const ListingTypeStandard ListingType = `STANDARD` + +// String representation for [fmt.Print] +func (f *ListingType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListingType) Set(v string) error { + switch v { + case `PERSONALIZED`, `STANDARD`: + *f = ListingType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PERSONALIZED", "STANDARD"`, v) + } +} + +// Type always returns ListingType to satisfy [pflag.Value] interface +func (f *ListingType) Type() string { + return "ListingType" +} + +type MarketplaceFileType string + +const MarketplaceFileTypeEmbeddedNotebook MarketplaceFileType = `EMBEDDED_NOTEBOOK` + +const MarketplaceFileTypeProviderIcon MarketplaceFileType = `PROVIDER_ICON` + +// String representation for [fmt.Print] +func (f *MarketplaceFileType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MarketplaceFileType) Set(v string) error { + switch v { + case `EMBEDDED_NOTEBOOK`, `PROVIDER_ICON`: + *f = MarketplaceFileType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EMBEDDED_NOTEBOOK", "PROVIDER_ICON"`, v) + } +} + +// Type always returns MarketplaceFileType to satisfy [pflag.Value] interface +func (f *MarketplaceFileType) Type() string { + return "MarketplaceFileType" +} + +type PersonalizationRequest struct { + Comment string `json:"comment,omitempty"` + + ConsumerRegion RegionInfo `json:"consumer_region"` + // contact info for the consumer requesting data or performing a listing + // installation + ContactInfo *ContactInfo `json:"contact_info,omitempty"` + + CreatedAt int64 `json:"created_at,omitempty"` + + Id string `json:"id,omitempty"` + + IntendedUse string `json:"intended_use,omitempty"` + + IsFromLighthouse bool `json:"is_from_lighthouse,omitempty"` + + ListingId string `json:"listing_id,omitempty"` + + ListingName string `json:"listing_name,omitempty"` + + MetastoreId string `json:"metastore_id,omitempty"` + + ProviderId string `json:"provider_id,omitempty"` + + RecipientType DeltaSharingRecipientType `json:"recipient_type,omitempty"` + + Share *ShareInfo `json:"share,omitempty"` + + Status PersonalizationRequestStatus `json:"status,omitempty"` + + StatusMessage string `json:"status_message,omitempty"` + + UpdatedAt int64 `json:"updated_at,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PersonalizationRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PersonalizationRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PersonalizationRequestStatus string + +const PersonalizationRequestStatusDenied PersonalizationRequestStatus = `DENIED` + +const PersonalizationRequestStatusFulfilled PersonalizationRequestStatus = `FULFILLED` + +const PersonalizationRequestStatusNew PersonalizationRequestStatus = `NEW` + +const PersonalizationRequestStatusRequestPending PersonalizationRequestStatus = `REQUEST_PENDING` + +// String representation for [fmt.Print] +func (f *PersonalizationRequestStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PersonalizationRequestStatus) Set(v string) error { + switch v { + case `DENIED`, `FULFILLED`, `NEW`, `REQUEST_PENDING`: + *f = PersonalizationRequestStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DENIED", "FULFILLED", "NEW", "REQUEST_PENDING"`, v) + } +} + +// Type always returns PersonalizationRequestStatus to satisfy [pflag.Value] interface +func (f *PersonalizationRequestStatus) Type() string { + return "PersonalizationRequestStatus" +} + +type ProviderAnalyticsDashboard struct { + Id string `json:"id"` +} + +type ProviderInfo struct { + BusinessContactEmail string `json:"business_contact_email"` + + CompanyWebsiteLink string `json:"company_website_link,omitempty"` + + DarkModeIconFileId string `json:"dark_mode_icon_file_id,omitempty"` + + DarkModeIconFilePath string `json:"dark_mode_icon_file_path,omitempty"` + + Description string `json:"description,omitempty"` + + IconFileId string `json:"icon_file_id,omitempty"` + + IconFilePath string `json:"icon_file_path,omitempty"` + + Id string `json:"id,omitempty"` + // is_featured is accessible by consumers only + IsFeatured bool `json:"is_featured,omitempty"` + + Name string `json:"name"` + + PrivacyPolicyLink string `json:"privacy_policy_link"` + // published_by is only applicable to data aggregators (e.g. Crux) + PublishedBy string `json:"published_by,omitempty"` + + SupportContactEmail string `json:"support_contact_email,omitempty"` + + TermOfServiceLink string `json:"term_of_service_link"` + + ForceSendFields []string `json:"-"` +} + +func (s *ProviderInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ProviderInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RegionInfo struct { + Cloud string `json:"cloud,omitempty"` + + Region string `json:"region,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegionInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegionInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Remove an exchange for listing +type RemoveExchangeForListingRequest struct { + Id string `json:"-" url:"-"` +} + +type RemoveExchangeForListingResponse struct { +} + +type RepoInfo struct { + // the git repo url e.g. https://github.com/databrickslabs/dolly.git + GitRepoUrl string `json:"git_repo_url"` +} + +type RepoInstallation struct { + // the user-specified repo name for their installed git repo listing + RepoName string `json:"repo_name"` + // refers to the full url file path that navigates the user to the repo's + // entrypoint (e.g. a README.md file, or the repo file view in the unified + // UI) should just be a relative path + RepoPath string `json:"repo_path"` +} + +// Search listings +type SearchListingsRequest struct { + // Matches any of the following asset types + Assets []AssetType `json:"-" url:"assets,omitempty"` + // Matches any of the following categories + Categories []Category `json:"-" url:"categories,omitempty"` + + IsFree bool `json:"-" url:"is_free,omitempty"` + + IsPrivateExchange bool `json:"-" url:"is_private_exchange,omitempty"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + // Matches any of the following provider ids + ProviderIds []string `json:"-" url:"provider_ids,omitempty"` + // Fuzzy matches query + Query string `json:"-" url:"query"` + + ForceSendFields []string `json:"-"` +} + +func (s *SearchListingsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SearchListingsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SearchListingsResponse struct { + Listings []Listing `json:"listings,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SearchListingsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SearchListingsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ShareInfo struct { + Name string `json:"name"` + + Type ListingShareType `json:"type"` +} + +type SharedDataObject struct { + // The type of the data object. Could be one of: TABLE, SCHEMA, + // NOTEBOOK_FILE, MODEL, VOLUME + DataObjectType string `json:"data_object_type,omitempty"` + // Name of the shared object + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SharedDataObject) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SharedDataObject) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TokenDetail struct { + BearerToken string `json:"bearerToken,omitempty"` + + Endpoint string `json:"endpoint,omitempty"` + + ExpirationTime string `json:"expirationTime,omitempty"` + // These field names must follow the delta sharing protocol. Original + // message: RetrieveToken.Response in + // managed-catalog/api/messages/recipient.proto + ShareCredentialsVersion int `json:"shareCredentialsVersion,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TokenDetail) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TokenDetail) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TokenInfo struct { + // Full activation url to retrieve the access token. It will be empty if the + // token is already retrieved. + ActivationUrl string `json:"activation_url,omitempty"` + // Time at which this Recipient Token was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of Recipient Token creator. + CreatedBy string `json:"created_by,omitempty"` + // Expiration timestamp of the token in epoch milliseconds. + ExpirationTime int64 `json:"expiration_time,omitempty"` + // Unique id of the Recipient Token. + Id string `json:"id,omitempty"` + // Time at which this Recipient Token was updated, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of Recipient Token updater. + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TokenInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TokenInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateExchangeFilterRequest struct { + Filter ExchangeFilter `json:"filter"` + + Id string `json:"-" url:"-"` +} + +type UpdateExchangeFilterResponse struct { + Filter *ExchangeFilter `json:"filter,omitempty"` +} + +type UpdateExchangeRequest struct { + Exchange Exchange `json:"exchange"` + + Id string `json:"-" url:"-"` +} + +type UpdateExchangeResponse struct { + Exchange *Exchange `json:"exchange,omitempty"` +} + +type UpdateInstallationRequest struct { + Installation InstallationDetail `json:"installation"` + + InstallationId string `json:"-" url:"-"` + + ListingId string `json:"-" url:"-"` + + RotateToken bool `json:"rotate_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateInstallationRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateInstallationRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateInstallationResponse struct { + Installation *InstallationDetail `json:"installation,omitempty"` +} + +type UpdateListingRequest struct { + Id string `json:"-" url:"-"` + + Listing Listing `json:"listing"` +} + +type UpdateListingResponse struct { + Listing *Listing `json:"listing,omitempty"` +} + +type UpdatePersonalizationRequestRequest struct { + ListingId string `json:"-" url:"-"` + + Reason string `json:"reason,omitempty"` + + RequestId string `json:"-" url:"-"` + + Share *ShareInfo `json:"share,omitempty"` + + Status PersonalizationRequestStatus `json:"status"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdatePersonalizationRequestRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdatePersonalizationRequestRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdatePersonalizationRequestResponse struct { + Request *PersonalizationRequest `json:"request,omitempty"` +} + +type UpdateProviderAnalyticsDashboardRequest struct { + // id is immutable property and can't be updated. + Id string `json:"-" url:"-"` + // this is the version of the dashboard template we want to update our user + // to current expectation is that it should be equal to latest version of + // the dashboard template + Version int64 `json:"version,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateProviderAnalyticsDashboardRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateProviderAnalyticsDashboardRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateProviderAnalyticsDashboardResponse struct { + // this is newly created Lakeview dashboard for the user + DashboardId string `json:"dashboard_id"` + // id & version should be the same as the request + Id string `json:"id"` + + Version int64 `json:"version,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateProviderAnalyticsDashboardResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateProviderAnalyticsDashboardResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateProviderRequest struct { + Id string `json:"-" url:"-"` + + Provider ProviderInfo `json:"provider"` +} + +type UpdateProviderResponse struct { + Provider *ProviderInfo `json:"provider,omitempty"` +} + +type Visibility string + +const VisibilityPrivate Visibility = `PRIVATE` + +const VisibilityPublic Visibility = `PUBLIC` + +// String representation for [fmt.Print] +func (f *Visibility) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Visibility) Set(v string) error { + switch v { + case `PRIVATE`, `PUBLIC`: + *f = Visibility(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PRIVATE", "PUBLIC"`, v) + } +} + +// Type always returns Visibility to satisfy [pflag.Value] interface +func (f *Visibility) Type() string { + return "Visibility" +} diff --git a/ml/v2preview/api.go b/ml/v2preview/api.go new file mode 100755 index 000000000..c4f394496 --- /dev/null +++ b/ml/v2preview/api.go @@ -0,0 +1,678 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Experiments Preview, Model Registry Preview, etc. +package mlpreview + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" +) + +type ExperimentsPreviewInterface interface { + + // Create experiment. + // + // Creates an experiment with a name. Returns the ID of the newly created + // experiment. Validates that another experiment with the same name does not + // already exist and fails if another experiment with the same name already + // exists. + // + // Throws `RESOURCE_ALREADY_EXISTS` if a experiment with the given name exists. + CreateExperiment(ctx context.Context, request CreateExperiment) (*CreateExperimentResponse, error) + + // Create a run. + // + // Creates a new run within an experiment. A run is usually a single execution + // of a machine learning or data ETL pipeline. MLflow uses runs to track the + // `mlflowParam`, `mlflowMetric` and `mlflowRunTag` associated with a single + // execution. + CreateRun(ctx context.Context, request CreateRun) (*CreateRunResponse, error) + + // Delete an experiment. + // + // Marks an experiment and associated metadata, runs, metrics, params, and tags + // for deletion. If the experiment uses FileStore, artifacts associated with + // experiment are also deleted. + DeleteExperiment(ctx context.Context, request DeleteExperiment) error + + // Delete a run. + // + // Marks a run for deletion. + DeleteRun(ctx context.Context, request DeleteRun) error + + // Delete runs by creation time. + // + // Bulk delete runs in an experiment that were created prior to or at the + // specified timestamp. Deletes at most max_runs per request. To call this API + // from a Databricks Notebook in Python, you can use the client code snippet on + // https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete. + DeleteRuns(ctx context.Context, request DeleteRuns) (*DeleteRunsResponse, error) + + // Delete a tag. + // + // Deletes a tag on a run. Tags are run metadata that can be updated during a + // run and after a run completes. + DeleteTag(ctx context.Context, request DeleteTag) error + + // Get metadata. + // + // Gets metadata for an experiment. + // + // This endpoint will return deleted experiments, but prefers the active + // experiment if an active and deleted experiment share the same name. If + // multiple deleted experiments share the same name, the API will return one of + // them. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if no experiment with the specified name + // exists. + GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentResponse, error) + + // Get an experiment. + // + // Gets metadata for an experiment. This method works on deleted experiments. + GetExperiment(ctx context.Context, request GetExperimentRequest) (*GetExperimentResponse, error) + + // Get history of a given metric within a run. + // + // Gets a list of all values for the specified metric for a given run. + // + // This method is generated by Databricks SDK Code Generator. + GetHistory(ctx context.Context, request GetHistoryRequest) listing.Iterator[Metric] + + // Get history of a given metric within a run. + // + // Gets a list of all values for the specified metric for a given run. + // + // This method is generated by Databricks SDK Code Generator. + GetHistoryAll(ctx context.Context, request GetHistoryRequest) ([]Metric, error) + + // Get experiment permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetExperimentPermissionLevelsRequest) (*GetExperimentPermissionLevelsResponse, error) + + // Get experiment permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByExperimentId(ctx context.Context, experimentId string) (*GetExperimentPermissionLevelsResponse, error) + + // Get experiment permissions. + // + // Gets the permissions of an experiment. Experiments can inherit permissions + // from their root object. + GetPermissions(ctx context.Context, request GetExperimentPermissionsRequest) (*ExperimentPermissions, error) + + // Get experiment permissions. + // + // Gets the permissions of an experiment. Experiments can inherit permissions + // from their root object. + GetPermissionsByExperimentId(ctx context.Context, experimentId string) (*ExperimentPermissions, error) + + // Get a run. + // + // Gets the metadata, metrics, params, and tags for a run. In the case where + // multiple metrics with the same key are logged for a run, return only the + // value with the latest timestamp. + // + // If there are multiple values with the latest timestamp, return the maximum of + // these values. + GetRun(ctx context.Context, request GetRunRequest) (*GetRunResponse, error) + + // Get all artifacts. + // + // List artifacts for a run. Takes an optional `artifact_path` prefix. If it is + // specified, the response contains only artifacts with the specified prefix. + // This API does not support pagination when listing artifacts in UC Volumes. A + // maximum of 1000 artifacts will be retrieved for UC Volumes. Please call + // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC + // Volumes, which supports pagination. See [List directory contents | Files + // API](/api/workspace/files/listdirectorycontents). + // + // This method is generated by Databricks SDK Code Generator. + ListArtifacts(ctx context.Context, request ListArtifactsRequest) listing.Iterator[FileInfo] + + // Get all artifacts. + // + // List artifacts for a run. Takes an optional `artifact_path` prefix. If it is + // specified, the response contains only artifacts with the specified prefix. + // This API does not support pagination when listing artifacts in UC Volumes. A + // maximum of 1000 artifacts will be retrieved for UC Volumes. Please call + // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC + // Volumes, which supports pagination. See [List directory contents | Files + // API](/api/workspace/files/listdirectorycontents). + // + // This method is generated by Databricks SDK Code Generator. + ListArtifactsAll(ctx context.Context, request ListArtifactsRequest) ([]FileInfo, error) + + // List experiments. + // + // Gets a list of all experiments. + // + // This method is generated by Databricks SDK Code Generator. + ListExperiments(ctx context.Context, request ListExperimentsRequest) listing.Iterator[Experiment] + + // List experiments. + // + // Gets a list of all experiments. + // + // This method is generated by Databricks SDK Code Generator. + ListExperimentsAll(ctx context.Context, request ListExperimentsRequest) ([]Experiment, error) + + // Log a batch. + // + // Logs a batch of metrics, params, and tags for a run. If any data failed to be + // persisted, the server will respond with an error (non-200 status code). + // + // In case of error (due to internal server error or an invalid request), + // partial data may be written. + // + // You can write metrics, params, and tags in interleaving fashion, but within a + // given entity type are guaranteed to follow the order specified in the request + // body. + // + // The overwrite behavior for metrics, params, and tags is as follows: + // + // * Metrics: metric values are never overwritten. Logging a metric (key, value, + // timestamp) appends to the set of values for the metric with the provided key. + // + // * Tags: tag values can be overwritten by successive writes to the same tag + // key. That is, if multiple tag values with the same key are provided in the + // same API request, the last-provided tag value is written. Logging the same + // tag (key, value) is permitted. Specifically, logging a tag is idempotent. + // + // * Parameters: once written, param values cannot be changed (attempting to + // overwrite a param value will result in an error). However, logging the same + // param (key, value) is permitted. Specifically, logging a param is idempotent. + // + // Request Limits ------------------------------- A single JSON-serialized API + // request may be up to 1 MB in size and contain: + // + // * No more than 1000 metrics, params, and tags in total * Up to 1000 metrics * + // Up to 100 params * Up to 100 tags + // + // For example, a valid request might contain 900 metrics, 50 params, and 50 + // tags, but logging 900 metrics, 50 params, and 51 tags is invalid. + // + // The following limits also apply to metric, param, and tag keys and values: + // + // * Metric keys, param keys, and tag keys can be up to 250 characters in length + // * Parameter and tag values can be up to 250 characters in length + LogBatch(ctx context.Context, request LogBatch) error + + // Log inputs to a run. + // + // **NOTE:** Experimental: This API may change or be removed in a future release + // without warning. + LogInputs(ctx context.Context, request LogInputs) error + + // Log a metric. + // + // Logs a metric for a run. A metric is a key-value pair (string key, float + // value) with an associated timestamp. Examples include the various metrics + // that represent ML model accuracy. A metric can be logged multiple times. + LogMetric(ctx context.Context, request LogMetric) error + + // Log a model. + // + // **NOTE:** Experimental: This API may change or be removed in a future release + // without warning. + LogModel(ctx context.Context, request LogModel) error + + // Log a param. + // + // Logs a param used for a run. A param is a key-value pair (string key, string + // value). Examples include hyperparameters used for ML model training and + // constant dates and values used in an ETL pipeline. A param can be logged only + // once for a run. + LogParam(ctx context.Context, request LogParam) error + + // Restores an experiment. + // + // Restore an experiment marked for deletion. This also restores associated + // metadata, runs, metrics, params, and tags. If experiment uses FileStore, + // underlying artifacts associated with experiment are also restored. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if experiment was never created or was + // permanently deleted. + RestoreExperiment(ctx context.Context, request RestoreExperiment) error + + // Restore a run. + // + // Restores a deleted run. + RestoreRun(ctx context.Context, request RestoreRun) error + + // Restore runs by deletion time. + // + // Bulk restore runs in an experiment that were deleted no earlier than the + // specified timestamp. Restores at most max_runs per request. To call this API + // from a Databricks Notebook in Python, you can use the client code snippet on + // https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore. + RestoreRuns(ctx context.Context, request RestoreRuns) (*RestoreRunsResponse, error) + + // Search experiments. + // + // Searches for experiments that satisfy specified search criteria. + // + // This method is generated by Databricks SDK Code Generator. + SearchExperiments(ctx context.Context, request SearchExperiments) listing.Iterator[Experiment] + + // Search experiments. + // + // Searches for experiments that satisfy specified search criteria. + // + // This method is generated by Databricks SDK Code Generator. + SearchExperimentsAll(ctx context.Context, request SearchExperiments) ([]Experiment, error) + + // Search for runs. + // + // Searches for runs that satisfy expressions. + // + // Search expressions can use `mlflowMetric` and `mlflowParam` keys.", + // + // This method is generated by Databricks SDK Code Generator. + SearchRuns(ctx context.Context, request SearchRuns) listing.Iterator[Run] + + // Search for runs. + // + // Searches for runs that satisfy expressions. + // + // Search expressions can use `mlflowMetric` and `mlflowParam` keys.", + // + // This method is generated by Databricks SDK Code Generator. + SearchRunsAll(ctx context.Context, request SearchRuns) ([]Run, error) + + // Set a tag. + // + // Sets a tag on an experiment. Experiment tags are metadata that can be + // updated. + SetExperimentTag(ctx context.Context, request SetExperimentTag) error + + // Set experiment permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) + + // Set a tag. + // + // Sets a tag on a run. Tags are run metadata that can be updated during a run + // and after a run completes. + SetTag(ctx context.Context, request SetTag) error + + // Update an experiment. + // + // Updates experiment metadata. + UpdateExperiment(ctx context.Context, request UpdateExperiment) error + + // Update experiment permissions. + // + // Updates the permissions on an experiment. Experiments can inherit permissions + // from their root object. + UpdatePermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) + + // Update a run. + // + // Updates run metadata. + UpdateRun(ctx context.Context, request UpdateRun) (*UpdateRunResponse, error) +} + +func NewExperimentsPreview(client *client.DatabricksClient) *ExperimentsPreviewAPI { + return &ExperimentsPreviewAPI{ + experimentsPreviewImpl: experimentsPreviewImpl{ + client: client, + }, + } +} + +// Experiments are the primary unit of organization in MLflow; all MLflow runs +// belong to an experiment. Each experiment lets you visualize, search, and +// compare runs, as well as download run artifacts or metadata for analysis in +// other tools. Experiments are maintained in a Databricks hosted MLflow +// tracking server. +// +// Experiments are located in the workspace file tree. You manage experiments +// using the same tools you use to manage other workspace objects such as +// folders, notebooks, and libraries. +type ExperimentsPreviewAPI struct { + experimentsPreviewImpl +} + +// Get experiment permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *ExperimentsPreviewAPI) GetPermissionLevelsByExperimentId(ctx context.Context, experimentId string) (*GetExperimentPermissionLevelsResponse, error) { + return a.experimentsPreviewImpl.GetPermissionLevels(ctx, GetExperimentPermissionLevelsRequest{ + ExperimentId: experimentId, + }) +} + +// Get experiment permissions. +// +// Gets the permissions of an experiment. Experiments can inherit permissions +// from their root object. +func (a *ExperimentsPreviewAPI) GetPermissionsByExperimentId(ctx context.Context, experimentId string) (*ExperimentPermissions, error) { + return a.experimentsPreviewImpl.GetPermissions(ctx, GetExperimentPermissionsRequest{ + ExperimentId: experimentId, + }) +} + +type ModelRegistryPreviewInterface interface { + + // Approve transition request. + // + // Approves a model version stage transition request. + ApproveTransitionRequest(ctx context.Context, request ApproveTransitionRequest) (*ApproveTransitionRequestResponse, error) + + // Post a comment. + // + // Posts a comment on a model version. A comment can be submitted either by a + // user or programmatically to display relevant information about the model. For + // example, test results or deployment errors. + CreateComment(ctx context.Context, request CreateComment) (*CreateCommentResponse, error) + + // Create a model. + // + // Creates a new registered model with the name specified in the request body. + // + // Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name + // exists. + CreateModel(ctx context.Context, request CreateModelRequest) (*CreateModelResponse, error) + + // Create a model version. + // + // Creates a model version. + CreateModelVersion(ctx context.Context, request CreateModelVersionRequest) (*CreateModelVersionResponse, error) + + // Make a transition request. + // + // Creates a model version stage transition request. + CreateTransitionRequest(ctx context.Context, request CreateTransitionRequest) (*CreateTransitionRequestResponse, error) + + // Create a webhook. + // + // **NOTE**: This endpoint is in Public Preview. + // + // Creates a registry webhook. + CreateWebhook(ctx context.Context, request CreateRegistryWebhook) (*CreateWebhookResponse, error) + + // Delete a comment. + // + // Deletes a comment on a model version. + DeleteComment(ctx context.Context, request DeleteCommentRequest) error + + // Delete a model. + // + // Deletes a registered model. + DeleteModel(ctx context.Context, request DeleteModelRequest) error + + // Delete a model tag. + // + // Deletes the tag for a registered model. + DeleteModelTag(ctx context.Context, request DeleteModelTagRequest) error + + // Delete a model version. + // + // Deletes a model version. + DeleteModelVersion(ctx context.Context, request DeleteModelVersionRequest) error + + // Delete a model version tag. + // + // Deletes a model version tag. + DeleteModelVersionTag(ctx context.Context, request DeleteModelVersionTagRequest) error + + // Delete a transition request. + // + // Cancels a model version stage transition request. + DeleteTransitionRequest(ctx context.Context, request DeleteTransitionRequestRequest) error + + // Delete a webhook. + // + // **NOTE:** This endpoint is in Public Preview. + // + // Deletes a registry webhook. + DeleteWebhook(ctx context.Context, request DeleteWebhookRequest) error + + // Get the latest version. + // + // Gets the latest version of a registered model. + // + // This method is generated by Databricks SDK Code Generator. + GetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) listing.Iterator[ModelVersion] + + // Get the latest version. + // + // Gets the latest version of a registered model. + // + // This method is generated by Databricks SDK Code Generator. + GetLatestVersionsAll(ctx context.Context, request GetLatestVersionsRequest) ([]ModelVersion, error) + + // Get model. + // + // Get the details of a model. This is a Databricks workspace version of the + // [MLflow endpoint] that also returns the model's Databricks workspace ID and + // the permission level of the requesting user on the model. + // + // [MLflow endpoint]: https://www.mlflow.org/docs/latest/rest-api.html#get-registeredmodel + GetModel(ctx context.Context, request GetModelRequest) (*GetModelResponse, error) + + // Get a model version. + // + // Get a model version. + GetModelVersion(ctx context.Context, request GetModelVersionRequest) (*GetModelVersionResponse, error) + + // Get a model version URI. + // + // Gets a URI to download the model version. + GetModelVersionDownloadUri(ctx context.Context, request GetModelVersionDownloadUriRequest) (*GetModelVersionDownloadUriResponse, error) + + // Get registered model permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetRegisteredModelPermissionLevelsRequest) (*GetRegisteredModelPermissionLevelsResponse, error) + + // Get registered model permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByRegisteredModelId(ctx context.Context, registeredModelId string) (*GetRegisteredModelPermissionLevelsResponse, error) + + // Get registered model permissions. + // + // Gets the permissions of a registered model. Registered models can inherit + // permissions from their root object. + GetPermissions(ctx context.Context, request GetRegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) + + // Get registered model permissions. + // + // Gets the permissions of a registered model. Registered models can inherit + // permissions from their root object. + GetPermissionsByRegisteredModelId(ctx context.Context, registeredModelId string) (*RegisteredModelPermissions, error) + + // List models. + // + // Lists all available registered models, up to the limit specified in + // __max_results__. + // + // This method is generated by Databricks SDK Code Generator. + ListModels(ctx context.Context, request ListModelsRequest) listing.Iterator[Model] + + // List models. + // + // Lists all available registered models, up to the limit specified in + // __max_results__. + // + // This method is generated by Databricks SDK Code Generator. + ListModelsAll(ctx context.Context, request ListModelsRequest) ([]Model, error) + + // List transition requests. + // + // Gets a list of all open stage transition requests for the model version. + // + // This method is generated by Databricks SDK Code Generator. + ListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) listing.Iterator[Activity] + + // List transition requests. + // + // Gets a list of all open stage transition requests for the model version. + // + // This method is generated by Databricks SDK Code Generator. + ListTransitionRequestsAll(ctx context.Context, request ListTransitionRequestsRequest) ([]Activity, error) + + // List registry webhooks. + // + // **NOTE:** This endpoint is in Public Preview. + // + // Lists all registry webhooks. + // + // This method is generated by Databricks SDK Code Generator. + ListWebhooks(ctx context.Context, request ListWebhooksRequest) listing.Iterator[RegistryWebhook] + + // List registry webhooks. + // + // **NOTE:** This endpoint is in Public Preview. + // + // Lists all registry webhooks. + // + // This method is generated by Databricks SDK Code Generator. + ListWebhooksAll(ctx context.Context, request ListWebhooksRequest) ([]RegistryWebhook, error) + + // Reject a transition request. + // + // Rejects a model version stage transition request. + RejectTransitionRequest(ctx context.Context, request RejectTransitionRequest) (*RejectTransitionRequestResponse, error) + + // Rename a model. + // + // Renames a registered model. + RenameModel(ctx context.Context, request RenameModelRequest) (*RenameModelResponse, error) + + // Searches model versions. + // + // Searches for specific model versions based on the supplied __filter__. + // + // This method is generated by Databricks SDK Code Generator. + SearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) listing.Iterator[ModelVersion] + + // Searches model versions. + // + // Searches for specific model versions based on the supplied __filter__. + // + // This method is generated by Databricks SDK Code Generator. + SearchModelVersionsAll(ctx context.Context, request SearchModelVersionsRequest) ([]ModelVersion, error) + + // Search models. + // + // Search for registered models based on the specified __filter__. + // + // This method is generated by Databricks SDK Code Generator. + SearchModels(ctx context.Context, request SearchModelsRequest) listing.Iterator[Model] + + // Search models. + // + // Search for registered models based on the specified __filter__. + // + // This method is generated by Databricks SDK Code Generator. + SearchModelsAll(ctx context.Context, request SearchModelsRequest) ([]Model, error) + + // Set a tag. + // + // Sets a tag on a registered model. + SetModelTag(ctx context.Context, request SetModelTagRequest) error + + // Set a version tag. + // + // Sets a model version tag. + SetModelVersionTag(ctx context.Context, request SetModelVersionTagRequest) error + + // Set registered model permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request RegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) + + // Test a webhook. + // + // **NOTE:** This endpoint is in Public Preview. + // + // Tests a registry webhook. + TestRegistryWebhook(ctx context.Context, request TestRegistryWebhookRequest) (*TestRegistryWebhookResponse, error) + + // Transition a stage. + // + // Transition a model version's stage. This is a Databricks workspace version of + // the [MLflow endpoint] that also accepts a comment associated with the + // transition to be recorded.", + // + // [MLflow endpoint]: https://www.mlflow.org/docs/latest/rest-api.html#transition-modelversion-stage + TransitionStage(ctx context.Context, request TransitionModelVersionStageDatabricks) (*TransitionStageResponse, error) + + // Update a comment. + // + // Post an edit to a comment on a model version. + UpdateComment(ctx context.Context, request UpdateComment) (*UpdateCommentResponse, error) + + // Update model. + // + // Updates a registered model. + UpdateModel(ctx context.Context, request UpdateModelRequest) error + + // Update model version. + // + // Updates the model version. + UpdateModelVersion(ctx context.Context, request UpdateModelVersionRequest) error + + // Update registered model permissions. + // + // Updates the permissions on a registered model. Registered models can inherit + // permissions from their root object. + UpdatePermissions(ctx context.Context, request RegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) + + // Update a webhook. + // + // **NOTE:** This endpoint is in Public Preview. + // + // Updates a registry webhook. + UpdateWebhook(ctx context.Context, request UpdateRegistryWebhook) error +} + +func NewModelRegistryPreview(client *client.DatabricksClient) *ModelRegistryPreviewAPI { + return &ModelRegistryPreviewAPI{ + modelRegistryPreviewImpl: modelRegistryPreviewImpl{ + client: client, + }, + } +} + +// Note: This API reference documents APIs for the Workspace Model Registry. +// Databricks recommends using [Models in Unity +// Catalog](/api/workspace/registeredmodels) instead. Models in Unity Catalog +// provides centralized model governance, cross-workspace access, lineage, and +// deployment. Workspace Model Registry will be deprecated in the future. +// +// The Workspace Model Registry is a centralized model repository and a UI and +// set of APIs that enable you to manage the full lifecycle of MLflow Models. +type ModelRegistryPreviewAPI struct { + modelRegistryPreviewImpl +} + +// Get registered model permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *ModelRegistryPreviewAPI) GetPermissionLevelsByRegisteredModelId(ctx context.Context, registeredModelId string) (*GetRegisteredModelPermissionLevelsResponse, error) { + return a.modelRegistryPreviewImpl.GetPermissionLevels(ctx, GetRegisteredModelPermissionLevelsRequest{ + RegisteredModelId: registeredModelId, + }) +} + +// Get registered model permissions. +// +// Gets the permissions of a registered model. Registered models can inherit +// permissions from their root object. +func (a *ModelRegistryPreviewAPI) GetPermissionsByRegisteredModelId(ctx context.Context, registeredModelId string) (*RegisteredModelPermissions, error) { + return a.modelRegistryPreviewImpl.GetPermissions(ctx, GetRegisteredModelPermissionsRequest{ + RegisteredModelId: registeredModelId, + }) +} diff --git a/ml/v2preview/client.go b/ml/v2preview/client.go new file mode 100755 index 000000000..d3b7aadaa --- /dev/null +++ b/ml/v2preview/client.go @@ -0,0 +1,79 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package mlpreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type ExperimentsPreviewClient struct { + ExperimentsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewExperimentsPreviewClient(cfg *config.Config) (*ExperimentsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ExperimentsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ExperimentsPreviewInterface: NewExperimentsPreview(databricksClient), + }, nil +} + +type ModelRegistryPreviewClient struct { + ModelRegistryPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewModelRegistryPreviewClient(cfg *config.Config) (*ModelRegistryPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ModelRegistryPreviewClient{ + Config: cfg, + apiClient: apiClient, + ModelRegistryPreviewInterface: NewModelRegistryPreview(databricksClient), + }, nil +} diff --git a/ml/v2preview/impl.go b/ml/v2preview/impl.go new file mode 100755 index 000000000..e750866bf --- /dev/null +++ b/ml/v2preview/impl.go @@ -0,0 +1,1113 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package mlpreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just ExperimentsPreview API methods +type experimentsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *experimentsPreviewImpl) CreateExperiment(ctx context.Context, request CreateExperiment) (*CreateExperimentResponse, error) { + var createExperimentResponse CreateExperimentResponse + path := "/api/2.0preview/mlflow/experiments/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createExperimentResponse) + return &createExperimentResponse, err +} + +func (a *experimentsPreviewImpl) CreateRun(ctx context.Context, request CreateRun) (*CreateRunResponse, error) { + var createRunResponse CreateRunResponse + path := "/api/2.0preview/mlflow/runs/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createRunResponse) + return &createRunResponse, err +} + +func (a *experimentsPreviewImpl) DeleteExperiment(ctx context.Context, request DeleteExperiment) error { + var deleteExperimentResponse DeleteExperimentResponse + path := "/api/2.0preview/mlflow/experiments/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteExperimentResponse) + return err +} + +func (a *experimentsPreviewImpl) DeleteRun(ctx context.Context, request DeleteRun) error { + var deleteRunResponse DeleteRunResponse + path := "/api/2.0preview/mlflow/runs/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteRunResponse) + return err +} + +func (a *experimentsPreviewImpl) DeleteRuns(ctx context.Context, request DeleteRuns) (*DeleteRunsResponse, error) { + var deleteRunsResponse DeleteRunsResponse + path := "/api/2.0preview/mlflow/databricks/runs/delete-runs" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteRunsResponse) + return &deleteRunsResponse, err +} + +func (a *experimentsPreviewImpl) DeleteTag(ctx context.Context, request DeleteTag) error { + var deleteTagResponse DeleteTagResponse + path := "/api/2.0preview/mlflow/runs/delete-tag" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteTagResponse) + return err +} + +func (a *experimentsPreviewImpl) GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentResponse, error) { + var getExperimentResponse GetExperimentResponse + path := "/api/2.0preview/mlflow/experiments/get-by-name" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getExperimentResponse) + return &getExperimentResponse, err +} + +func (a *experimentsPreviewImpl) GetExperiment(ctx context.Context, request GetExperimentRequest) (*GetExperimentResponse, error) { + var getExperimentResponse GetExperimentResponse + path := "/api/2.0preview/mlflow/experiments/get" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getExperimentResponse) + return &getExperimentResponse, err +} + +// Get history of a given metric within a run. +// +// Gets a list of all values for the specified metric for a given run. +func (a *experimentsPreviewImpl) GetHistory(ctx context.Context, request GetHistoryRequest) listing.Iterator[Metric] { + + getNextPage := func(ctx context.Context, req GetHistoryRequest) (*GetMetricHistoryResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalGetHistory(ctx, req) + } + getItems := func(resp *GetMetricHistoryResponse) []Metric { + return resp.Metrics + } + getNextReq := func(resp *GetMetricHistoryResponse) *GetHistoryRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get history of a given metric within a run. +// +// Gets a list of all values for the specified metric for a given run. +func (a *experimentsPreviewImpl) GetHistoryAll(ctx context.Context, request GetHistoryRequest) ([]Metric, error) { + iterator := a.GetHistory(ctx, request) + return listing.ToSliceN[Metric, int](ctx, iterator, request.MaxResults) + +} +func (a *experimentsPreviewImpl) internalGetHistory(ctx context.Context, request GetHistoryRequest) (*GetMetricHistoryResponse, error) { + var getMetricHistoryResponse GetMetricHistoryResponse + path := "/api/2.0preview/mlflow/metrics/get-history" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getMetricHistoryResponse) + return &getMetricHistoryResponse, err +} + +func (a *experimentsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetExperimentPermissionLevelsRequest) (*GetExperimentPermissionLevelsResponse, error) { + var getExperimentPermissionLevelsResponse GetExperimentPermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/experiments/%v/permissionLevels", request.ExperimentId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getExperimentPermissionLevelsResponse) + return &getExperimentPermissionLevelsResponse, err +} + +func (a *experimentsPreviewImpl) GetPermissions(ctx context.Context, request GetExperimentPermissionsRequest) (*ExperimentPermissions, error) { + var experimentPermissions ExperimentPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/experiments/%v", request.ExperimentId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &experimentPermissions) + return &experimentPermissions, err +} + +func (a *experimentsPreviewImpl) GetRun(ctx context.Context, request GetRunRequest) (*GetRunResponse, error) { + var getRunResponse GetRunResponse + path := "/api/2.0preview/mlflow/runs/get" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getRunResponse) + return &getRunResponse, err +} + +// Get all artifacts. +// +// List artifacts for a run. Takes an optional `artifact_path` prefix. If it is +// specified, the response contains only artifacts with the specified prefix. +// This API does not support pagination when listing artifacts in UC Volumes. A +// maximum of 1000 artifacts will be retrieved for UC Volumes. Please call +// `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC +// Volumes, which supports pagination. See [List directory contents | Files +// API](/api/workspace/files/listdirectorycontents). +func (a *experimentsPreviewImpl) ListArtifacts(ctx context.Context, request ListArtifactsRequest) listing.Iterator[FileInfo] { + + getNextPage := func(ctx context.Context, req ListArtifactsRequest) (*ListArtifactsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListArtifacts(ctx, req) + } + getItems := func(resp *ListArtifactsResponse) []FileInfo { + return resp.Files + } + getNextReq := func(resp *ListArtifactsResponse) *ListArtifactsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get all artifacts. +// +// List artifacts for a run. Takes an optional `artifact_path` prefix. If it is +// specified, the response contains only artifacts with the specified prefix. +// This API does not support pagination when listing artifacts in UC Volumes. A +// maximum of 1000 artifacts will be retrieved for UC Volumes. Please call +// `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC +// Volumes, which supports pagination. See [List directory contents | Files +// API](/api/workspace/files/listdirectorycontents). +func (a *experimentsPreviewImpl) ListArtifactsAll(ctx context.Context, request ListArtifactsRequest) ([]FileInfo, error) { + iterator := a.ListArtifacts(ctx, request) + return listing.ToSlice[FileInfo](ctx, iterator) +} +func (a *experimentsPreviewImpl) internalListArtifacts(ctx context.Context, request ListArtifactsRequest) (*ListArtifactsResponse, error) { + var listArtifactsResponse ListArtifactsResponse + path := "/api/2.0preview/mlflow/artifacts/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listArtifactsResponse) + return &listArtifactsResponse, err +} + +// List experiments. +// +// Gets a list of all experiments. +func (a *experimentsPreviewImpl) ListExperiments(ctx context.Context, request ListExperimentsRequest) listing.Iterator[Experiment] { + + getNextPage := func(ctx context.Context, req ListExperimentsRequest) (*ListExperimentsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListExperiments(ctx, req) + } + getItems := func(resp *ListExperimentsResponse) []Experiment { + return resp.Experiments + } + getNextReq := func(resp *ListExperimentsResponse) *ListExperimentsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List experiments. +// +// Gets a list of all experiments. +func (a *experimentsPreviewImpl) ListExperimentsAll(ctx context.Context, request ListExperimentsRequest) ([]Experiment, error) { + iterator := a.ListExperiments(ctx, request) + return listing.ToSliceN[Experiment, int](ctx, iterator, request.MaxResults) + +} +func (a *experimentsPreviewImpl) internalListExperiments(ctx context.Context, request ListExperimentsRequest) (*ListExperimentsResponse, error) { + var listExperimentsResponse ListExperimentsResponse + path := "/api/2.0preview/mlflow/experiments/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listExperimentsResponse) + return &listExperimentsResponse, err +} + +func (a *experimentsPreviewImpl) LogBatch(ctx context.Context, request LogBatch) error { + var logBatchResponse LogBatchResponse + path := "/api/2.0preview/mlflow/runs/log-batch" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &logBatchResponse) + return err +} + +func (a *experimentsPreviewImpl) LogInputs(ctx context.Context, request LogInputs) error { + var logInputsResponse LogInputsResponse + path := "/api/2.0preview/mlflow/runs/log-inputs" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &logInputsResponse) + return err +} + +func (a *experimentsPreviewImpl) LogMetric(ctx context.Context, request LogMetric) error { + var logMetricResponse LogMetricResponse + path := "/api/2.0preview/mlflow/runs/log-metric" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &logMetricResponse) + return err +} + +func (a *experimentsPreviewImpl) LogModel(ctx context.Context, request LogModel) error { + var logModelResponse LogModelResponse + path := "/api/2.0preview/mlflow/runs/log-model" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &logModelResponse) + return err +} + +func (a *experimentsPreviewImpl) LogParam(ctx context.Context, request LogParam) error { + var logParamResponse LogParamResponse + path := "/api/2.0preview/mlflow/runs/log-parameter" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &logParamResponse) + return err +} + +func (a *experimentsPreviewImpl) RestoreExperiment(ctx context.Context, request RestoreExperiment) error { + var restoreExperimentResponse RestoreExperimentResponse + path := "/api/2.0preview/mlflow/experiments/restore" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &restoreExperimentResponse) + return err +} + +func (a *experimentsPreviewImpl) RestoreRun(ctx context.Context, request RestoreRun) error { + var restoreRunResponse RestoreRunResponse + path := "/api/2.0preview/mlflow/runs/restore" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &restoreRunResponse) + return err +} + +func (a *experimentsPreviewImpl) RestoreRuns(ctx context.Context, request RestoreRuns) (*RestoreRunsResponse, error) { + var restoreRunsResponse RestoreRunsResponse + path := "/api/2.0preview/mlflow/databricks/runs/restore-runs" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &restoreRunsResponse) + return &restoreRunsResponse, err +} + +// Search experiments. +// +// Searches for experiments that satisfy specified search criteria. +func (a *experimentsPreviewImpl) SearchExperiments(ctx context.Context, request SearchExperiments) listing.Iterator[Experiment] { + + getNextPage := func(ctx context.Context, req SearchExperiments) (*SearchExperimentsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalSearchExperiments(ctx, req) + } + getItems := func(resp *SearchExperimentsResponse) []Experiment { + return resp.Experiments + } + getNextReq := func(resp *SearchExperimentsResponse) *SearchExperiments { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Search experiments. +// +// Searches for experiments that satisfy specified search criteria. +func (a *experimentsPreviewImpl) SearchExperimentsAll(ctx context.Context, request SearchExperiments) ([]Experiment, error) { + iterator := a.SearchExperiments(ctx, request) + return listing.ToSlice[Experiment](ctx, iterator) +} +func (a *experimentsPreviewImpl) internalSearchExperiments(ctx context.Context, request SearchExperiments) (*SearchExperimentsResponse, error) { + var searchExperimentsResponse SearchExperimentsResponse + path := "/api/2.0preview/mlflow/experiments/search" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &searchExperimentsResponse) + return &searchExperimentsResponse, err +} + +// Search for runs. +// +// Searches for runs that satisfy expressions. +// +// Search expressions can use `mlflowMetric` and `mlflowParam` keys.", +func (a *experimentsPreviewImpl) SearchRuns(ctx context.Context, request SearchRuns) listing.Iterator[Run] { + + getNextPage := func(ctx context.Context, req SearchRuns) (*SearchRunsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalSearchRuns(ctx, req) + } + getItems := func(resp *SearchRunsResponse) []Run { + return resp.Runs + } + getNextReq := func(resp *SearchRunsResponse) *SearchRuns { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Search for runs. +// +// Searches for runs that satisfy expressions. +// +// Search expressions can use `mlflowMetric` and `mlflowParam` keys.", +func (a *experimentsPreviewImpl) SearchRunsAll(ctx context.Context, request SearchRuns) ([]Run, error) { + iterator := a.SearchRuns(ctx, request) + return listing.ToSlice[Run](ctx, iterator) +} +func (a *experimentsPreviewImpl) internalSearchRuns(ctx context.Context, request SearchRuns) (*SearchRunsResponse, error) { + var searchRunsResponse SearchRunsResponse + path := "/api/2.0preview/mlflow/runs/search" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &searchRunsResponse) + return &searchRunsResponse, err +} + +func (a *experimentsPreviewImpl) SetExperimentTag(ctx context.Context, request SetExperimentTag) error { + var setExperimentTagResponse SetExperimentTagResponse + path := "/api/2.0preview/mlflow/experiments/set-experiment-tag" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &setExperimentTagResponse) + return err +} + +func (a *experimentsPreviewImpl) SetPermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) { + var experimentPermissions ExperimentPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/experiments/%v", request.ExperimentId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &experimentPermissions) + return &experimentPermissions, err +} + +func (a *experimentsPreviewImpl) SetTag(ctx context.Context, request SetTag) error { + var setTagResponse SetTagResponse + path := "/api/2.0preview/mlflow/runs/set-tag" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &setTagResponse) + return err +} + +func (a *experimentsPreviewImpl) UpdateExperiment(ctx context.Context, request UpdateExperiment) error { + var updateExperimentResponse UpdateExperimentResponse + path := "/api/2.0preview/mlflow/experiments/update" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &updateExperimentResponse) + return err +} + +func (a *experimentsPreviewImpl) UpdatePermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) { + var experimentPermissions ExperimentPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/experiments/%v", request.ExperimentId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &experimentPermissions) + return &experimentPermissions, err +} + +func (a *experimentsPreviewImpl) UpdateRun(ctx context.Context, request UpdateRun) (*UpdateRunResponse, error) { + var updateRunResponse UpdateRunResponse + path := "/api/2.0preview/mlflow/runs/update" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &updateRunResponse) + return &updateRunResponse, err +} + +// unexported type that holds implementations of just ModelRegistryPreview API methods +type modelRegistryPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *modelRegistryPreviewImpl) ApproveTransitionRequest(ctx context.Context, request ApproveTransitionRequest) (*ApproveTransitionRequestResponse, error) { + var approveTransitionRequestResponse ApproveTransitionRequestResponse + path := "/api/2.0preview/mlflow/transition-requests/approve" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &approveTransitionRequestResponse) + return &approveTransitionRequestResponse, err +} + +func (a *modelRegistryPreviewImpl) CreateComment(ctx context.Context, request CreateComment) (*CreateCommentResponse, error) { + var createCommentResponse CreateCommentResponse + path := "/api/2.0preview/mlflow/comments/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createCommentResponse) + return &createCommentResponse, err +} + +func (a *modelRegistryPreviewImpl) CreateModel(ctx context.Context, request CreateModelRequest) (*CreateModelResponse, error) { + var createModelResponse CreateModelResponse + path := "/api/2.0preview/mlflow/registered-models/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createModelResponse) + return &createModelResponse, err +} + +func (a *modelRegistryPreviewImpl) CreateModelVersion(ctx context.Context, request CreateModelVersionRequest) (*CreateModelVersionResponse, error) { + var createModelVersionResponse CreateModelVersionResponse + path := "/api/2.0preview/mlflow/model-versions/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createModelVersionResponse) + return &createModelVersionResponse, err +} + +func (a *modelRegistryPreviewImpl) CreateTransitionRequest(ctx context.Context, request CreateTransitionRequest) (*CreateTransitionRequestResponse, error) { + var createTransitionRequestResponse CreateTransitionRequestResponse + path := "/api/2.0preview/mlflow/transition-requests/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createTransitionRequestResponse) + return &createTransitionRequestResponse, err +} + +func (a *modelRegistryPreviewImpl) CreateWebhook(ctx context.Context, request CreateRegistryWebhook) (*CreateWebhookResponse, error) { + var createWebhookResponse CreateWebhookResponse + path := "/api/2.0preview/mlflow/registry-webhooks/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createWebhookResponse) + return &createWebhookResponse, err +} + +func (a *modelRegistryPreviewImpl) DeleteComment(ctx context.Context, request DeleteCommentRequest) error { + var deleteCommentResponse DeleteCommentResponse + path := "/api/2.0preview/mlflow/comments/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteCommentResponse) + return err +} + +func (a *modelRegistryPreviewImpl) DeleteModel(ctx context.Context, request DeleteModelRequest) error { + var deleteModelResponse DeleteModelResponse + path := "/api/2.0preview/mlflow/registered-models/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteModelResponse) + return err +} + +func (a *modelRegistryPreviewImpl) DeleteModelTag(ctx context.Context, request DeleteModelTagRequest) error { + var deleteModelTagResponse DeleteModelTagResponse + path := "/api/2.0preview/mlflow/registered-models/delete-tag" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteModelTagResponse) + return err +} + +func (a *modelRegistryPreviewImpl) DeleteModelVersion(ctx context.Context, request DeleteModelVersionRequest) error { + var deleteModelVersionResponse DeleteModelVersionResponse + path := "/api/2.0preview/mlflow/model-versions/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteModelVersionResponse) + return err +} + +func (a *modelRegistryPreviewImpl) DeleteModelVersionTag(ctx context.Context, request DeleteModelVersionTagRequest) error { + var deleteModelVersionTagResponse DeleteModelVersionTagResponse + path := "/api/2.0preview/mlflow/model-versions/delete-tag" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteModelVersionTagResponse) + return err +} + +func (a *modelRegistryPreviewImpl) DeleteTransitionRequest(ctx context.Context, request DeleteTransitionRequestRequest) error { + var deleteTransitionRequestResponse DeleteTransitionRequestResponse + path := "/api/2.0preview/mlflow/transition-requests/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteTransitionRequestResponse) + return err +} + +func (a *modelRegistryPreviewImpl) DeleteWebhook(ctx context.Context, request DeleteWebhookRequest) error { + var deleteWebhookResponse DeleteWebhookResponse + path := "/api/2.0preview/mlflow/registry-webhooks/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteWebhookResponse) + return err +} + +// Get the latest version. +// +// Gets the latest version of a registered model. +func (a *modelRegistryPreviewImpl) GetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) listing.Iterator[ModelVersion] { + + getNextPage := func(ctx context.Context, req GetLatestVersionsRequest) (*GetLatestVersionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalGetLatestVersions(ctx, req) + } + getItems := func(resp *GetLatestVersionsResponse) []ModelVersion { + return resp.ModelVersions + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get the latest version. +// +// Gets the latest version of a registered model. +func (a *modelRegistryPreviewImpl) GetLatestVersionsAll(ctx context.Context, request GetLatestVersionsRequest) ([]ModelVersion, error) { + iterator := a.GetLatestVersions(ctx, request) + return listing.ToSlice[ModelVersion](ctx, iterator) +} +func (a *modelRegistryPreviewImpl) internalGetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) (*GetLatestVersionsResponse, error) { + var getLatestVersionsResponse GetLatestVersionsResponse + path := "/api/2.0preview/mlflow/registered-models/get-latest-versions" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &getLatestVersionsResponse) + return &getLatestVersionsResponse, err +} + +func (a *modelRegistryPreviewImpl) GetModel(ctx context.Context, request GetModelRequest) (*GetModelResponse, error) { + var getModelResponse GetModelResponse + path := "/api/2.0preview/mlflow/databricks/registered-models/get" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getModelResponse) + return &getModelResponse, err +} + +func (a *modelRegistryPreviewImpl) GetModelVersion(ctx context.Context, request GetModelVersionRequest) (*GetModelVersionResponse, error) { + var getModelVersionResponse GetModelVersionResponse + path := "/api/2.0preview/mlflow/model-versions/get" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getModelVersionResponse) + return &getModelVersionResponse, err +} + +func (a *modelRegistryPreviewImpl) GetModelVersionDownloadUri(ctx context.Context, request GetModelVersionDownloadUriRequest) (*GetModelVersionDownloadUriResponse, error) { + var getModelVersionDownloadUriResponse GetModelVersionDownloadUriResponse + path := "/api/2.0preview/mlflow/model-versions/get-download-uri" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getModelVersionDownloadUriResponse) + return &getModelVersionDownloadUriResponse, err +} + +func (a *modelRegistryPreviewImpl) GetPermissionLevels(ctx context.Context, request GetRegisteredModelPermissionLevelsRequest) (*GetRegisteredModelPermissionLevelsResponse, error) { + var getRegisteredModelPermissionLevelsResponse GetRegisteredModelPermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/registered-models/%v/permissionLevels", request.RegisteredModelId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getRegisteredModelPermissionLevelsResponse) + return &getRegisteredModelPermissionLevelsResponse, err +} + +func (a *modelRegistryPreviewImpl) GetPermissions(ctx context.Context, request GetRegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { + var registeredModelPermissions RegisteredModelPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/registered-models/%v", request.RegisteredModelId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, ®isteredModelPermissions) + return ®isteredModelPermissions, err +} + +// List models. +// +// Lists all available registered models, up to the limit specified in +// __max_results__. +func (a *modelRegistryPreviewImpl) ListModels(ctx context.Context, request ListModelsRequest) listing.Iterator[Model] { + + getNextPage := func(ctx context.Context, req ListModelsRequest) (*ListModelsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListModels(ctx, req) + } + getItems := func(resp *ListModelsResponse) []Model { + return resp.RegisteredModels + } + getNextReq := func(resp *ListModelsResponse) *ListModelsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List models. +// +// Lists all available registered models, up to the limit specified in +// __max_results__. +func (a *modelRegistryPreviewImpl) ListModelsAll(ctx context.Context, request ListModelsRequest) ([]Model, error) { + iterator := a.ListModels(ctx, request) + return listing.ToSliceN[Model, int](ctx, iterator, request.MaxResults) + +} +func (a *modelRegistryPreviewImpl) internalListModels(ctx context.Context, request ListModelsRequest) (*ListModelsResponse, error) { + var listModelsResponse ListModelsResponse + path := "/api/2.0preview/mlflow/registered-models/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listModelsResponse) + return &listModelsResponse, err +} + +// List transition requests. +// +// Gets a list of all open stage transition requests for the model version. +func (a *modelRegistryPreviewImpl) ListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) listing.Iterator[Activity] { + + getNextPage := func(ctx context.Context, req ListTransitionRequestsRequest) (*ListTransitionRequestsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListTransitionRequests(ctx, req) + } + getItems := func(resp *ListTransitionRequestsResponse) []Activity { + return resp.Requests + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List transition requests. +// +// Gets a list of all open stage transition requests for the model version. +func (a *modelRegistryPreviewImpl) ListTransitionRequestsAll(ctx context.Context, request ListTransitionRequestsRequest) ([]Activity, error) { + iterator := a.ListTransitionRequests(ctx, request) + return listing.ToSlice[Activity](ctx, iterator) +} +func (a *modelRegistryPreviewImpl) internalListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) (*ListTransitionRequestsResponse, error) { + var listTransitionRequestsResponse ListTransitionRequestsResponse + path := "/api/2.0preview/mlflow/transition-requests/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listTransitionRequestsResponse) + return &listTransitionRequestsResponse, err +} + +// List registry webhooks. +// +// **NOTE:** This endpoint is in Public Preview. +// +// Lists all registry webhooks. +func (a *modelRegistryPreviewImpl) ListWebhooks(ctx context.Context, request ListWebhooksRequest) listing.Iterator[RegistryWebhook] { + + getNextPage := func(ctx context.Context, req ListWebhooksRequest) (*ListRegistryWebhooks, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListWebhooks(ctx, req) + } + getItems := func(resp *ListRegistryWebhooks) []RegistryWebhook { + return resp.Webhooks + } + getNextReq := func(resp *ListRegistryWebhooks) *ListWebhooksRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List registry webhooks. +// +// **NOTE:** This endpoint is in Public Preview. +// +// Lists all registry webhooks. +func (a *modelRegistryPreviewImpl) ListWebhooksAll(ctx context.Context, request ListWebhooksRequest) ([]RegistryWebhook, error) { + iterator := a.ListWebhooks(ctx, request) + return listing.ToSlice[RegistryWebhook](ctx, iterator) +} +func (a *modelRegistryPreviewImpl) internalListWebhooks(ctx context.Context, request ListWebhooksRequest) (*ListRegistryWebhooks, error) { + var listRegistryWebhooks ListRegistryWebhooks + path := "/api/2.0preview/mlflow/registry-webhooks/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listRegistryWebhooks) + return &listRegistryWebhooks, err +} + +func (a *modelRegistryPreviewImpl) RejectTransitionRequest(ctx context.Context, request RejectTransitionRequest) (*RejectTransitionRequestResponse, error) { + var rejectTransitionRequestResponse RejectTransitionRequestResponse + path := "/api/2.0preview/mlflow/transition-requests/reject" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &rejectTransitionRequestResponse) + return &rejectTransitionRequestResponse, err +} + +func (a *modelRegistryPreviewImpl) RenameModel(ctx context.Context, request RenameModelRequest) (*RenameModelResponse, error) { + var renameModelResponse RenameModelResponse + path := "/api/2.0preview/mlflow/registered-models/rename" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &renameModelResponse) + return &renameModelResponse, err +} + +// Searches model versions. +// +// Searches for specific model versions based on the supplied __filter__. +func (a *modelRegistryPreviewImpl) SearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) listing.Iterator[ModelVersion] { + + getNextPage := func(ctx context.Context, req SearchModelVersionsRequest) (*SearchModelVersionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalSearchModelVersions(ctx, req) + } + getItems := func(resp *SearchModelVersionsResponse) []ModelVersion { + return resp.ModelVersions + } + getNextReq := func(resp *SearchModelVersionsResponse) *SearchModelVersionsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Searches model versions. +// +// Searches for specific model versions based on the supplied __filter__. +func (a *modelRegistryPreviewImpl) SearchModelVersionsAll(ctx context.Context, request SearchModelVersionsRequest) ([]ModelVersion, error) { + iterator := a.SearchModelVersions(ctx, request) + return listing.ToSliceN[ModelVersion, int](ctx, iterator, request.MaxResults) + +} +func (a *modelRegistryPreviewImpl) internalSearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) (*SearchModelVersionsResponse, error) { + var searchModelVersionsResponse SearchModelVersionsResponse + path := "/api/2.0preview/mlflow/model-versions/search" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &searchModelVersionsResponse) + return &searchModelVersionsResponse, err +} + +// Search models. +// +// Search for registered models based on the specified __filter__. +func (a *modelRegistryPreviewImpl) SearchModels(ctx context.Context, request SearchModelsRequest) listing.Iterator[Model] { + + getNextPage := func(ctx context.Context, req SearchModelsRequest) (*SearchModelsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalSearchModels(ctx, req) + } + getItems := func(resp *SearchModelsResponse) []Model { + return resp.RegisteredModels + } + getNextReq := func(resp *SearchModelsResponse) *SearchModelsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Search models. +// +// Search for registered models based on the specified __filter__. +func (a *modelRegistryPreviewImpl) SearchModelsAll(ctx context.Context, request SearchModelsRequest) ([]Model, error) { + iterator := a.SearchModels(ctx, request) + return listing.ToSliceN[Model, int](ctx, iterator, request.MaxResults) + +} +func (a *modelRegistryPreviewImpl) internalSearchModels(ctx context.Context, request SearchModelsRequest) (*SearchModelsResponse, error) { + var searchModelsResponse SearchModelsResponse + path := "/api/2.0preview/mlflow/registered-models/search" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &searchModelsResponse) + return &searchModelsResponse, err +} + +func (a *modelRegistryPreviewImpl) SetModelTag(ctx context.Context, request SetModelTagRequest) error { + var setModelTagResponse SetModelTagResponse + path := "/api/2.0preview/mlflow/registered-models/set-tag" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &setModelTagResponse) + return err +} + +func (a *modelRegistryPreviewImpl) SetModelVersionTag(ctx context.Context, request SetModelVersionTagRequest) error { + var setModelVersionTagResponse SetModelVersionTagResponse + path := "/api/2.0preview/mlflow/model-versions/set-tag" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &setModelVersionTagResponse) + return err +} + +func (a *modelRegistryPreviewImpl) SetPermissions(ctx context.Context, request RegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { + var registeredModelPermissions RegisteredModelPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/registered-models/%v", request.RegisteredModelId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, ®isteredModelPermissions) + return ®isteredModelPermissions, err +} + +func (a *modelRegistryPreviewImpl) TestRegistryWebhook(ctx context.Context, request TestRegistryWebhookRequest) (*TestRegistryWebhookResponse, error) { + var testRegistryWebhookResponse TestRegistryWebhookResponse + path := "/api/2.0preview/mlflow/registry-webhooks/test" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &testRegistryWebhookResponse) + return &testRegistryWebhookResponse, err +} + +func (a *modelRegistryPreviewImpl) TransitionStage(ctx context.Context, request TransitionModelVersionStageDatabricks) (*TransitionStageResponse, error) { + var transitionStageResponse TransitionStageResponse + path := "/api/2.0preview/mlflow/databricks/model-versions/transition-stage" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &transitionStageResponse) + return &transitionStageResponse, err +} + +func (a *modelRegistryPreviewImpl) UpdateComment(ctx context.Context, request UpdateComment) (*UpdateCommentResponse, error) { + var updateCommentResponse UpdateCommentResponse + path := "/api/2.0preview/mlflow/comments/update" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateCommentResponse) + return &updateCommentResponse, err +} + +func (a *modelRegistryPreviewImpl) UpdateModel(ctx context.Context, request UpdateModelRequest) error { + var updateModelResponse UpdateModelResponse + path := "/api/2.0preview/mlflow/registered-models/update" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateModelResponse) + return err +} + +func (a *modelRegistryPreviewImpl) UpdateModelVersion(ctx context.Context, request UpdateModelVersionRequest) error { + var updateModelVersionResponse UpdateModelVersionResponse + path := "/api/2.0preview/mlflow/model-versions/update" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateModelVersionResponse) + return err +} + +func (a *modelRegistryPreviewImpl) UpdatePermissions(ctx context.Context, request RegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { + var registeredModelPermissions RegisteredModelPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/registered-models/%v", request.RegisteredModelId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, ®isteredModelPermissions) + return ®isteredModelPermissions, err +} + +func (a *modelRegistryPreviewImpl) UpdateWebhook(ctx context.Context, request UpdateRegistryWebhook) error { + var updateWebhookResponse UpdateWebhookResponse + path := "/api/2.0preview/mlflow/registry-webhooks/update" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateWebhookResponse) + return err +} diff --git a/ml/v2preview/model.go b/ml/v2preview/model.go new file mode 100755 index 000000000..6b79ff503 --- /dev/null +++ b/ml/v2preview/model.go @@ -0,0 +1,3074 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package mlpreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +// Activity recorded for the action. +type Activity struct { + // Type of activity. Valid values are: * `APPLIED_TRANSITION`: User applied + // the corresponding stage transition. + // + // * `REQUESTED_TRANSITION`: User requested the corresponding stage + // transition. + // + // * `CANCELLED_REQUEST`: User cancelled an existing transition request. + // + // * `APPROVED_REQUEST`: User approved the corresponding stage transition. + // + // * `REJECTED_REQUEST`: User rejected the coressponding stage transition. + // + // * `SYSTEM_TRANSITION`: For events performed as a side effect, such as + // archiving existing model versions in a stage. + ActivityType ActivityType `json:"activity_type,omitempty"` + // User-provided comment associated with the activity. + Comment string `json:"comment,omitempty"` + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + // Source stage of the transition (if the activity is stage transition + // related). Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + FromStage Stage `json:"from_stage,omitempty"` + // Unique identifier for the object. + Id string `json:"id,omitempty"` + // Time of the object at last update, as a Unix timestamp in milliseconds. + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // Comment made by system, for example explaining an activity of type + // `SYSTEM_TRANSITION`. It usually describes a side effect, such as a + // version being archived as part of another version's stage transition, and + // may not be returned for some activity types. + SystemComment string `json:"system_comment,omitempty"` + // Target stage of the transition (if the activity is stage transition + // related). Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + ToStage Stage `json:"to_stage,omitempty"` + // The username of the user that created the object. + UserId string `json:"user_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Activity) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Activity) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// An action that a user (with sufficient permissions) could take on an +// activity. Valid values are: * `APPROVE_TRANSITION_REQUEST`: Approve a +// transition request +// +// * `REJECT_TRANSITION_REQUEST`: Reject a transition request +// +// * `CANCEL_TRANSITION_REQUEST`: Cancel (delete) a transition request +type ActivityAction string + +// Approve a transition request +const ActivityActionApproveTransitionRequest ActivityAction = `APPROVE_TRANSITION_REQUEST` + +// Cancel (delete) a transition request +const ActivityActionCancelTransitionRequest ActivityAction = `CANCEL_TRANSITION_REQUEST` + +// Reject a transition request +const ActivityActionRejectTransitionRequest ActivityAction = `REJECT_TRANSITION_REQUEST` + +// String representation for [fmt.Print] +func (f *ActivityAction) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ActivityAction) Set(v string) error { + switch v { + case `APPROVE_TRANSITION_REQUEST`, `CANCEL_TRANSITION_REQUEST`, `REJECT_TRANSITION_REQUEST`: + *f = ActivityAction(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "APPROVE_TRANSITION_REQUEST", "CANCEL_TRANSITION_REQUEST", "REJECT_TRANSITION_REQUEST"`, v) + } +} + +// Type always returns ActivityAction to satisfy [pflag.Value] interface +func (f *ActivityAction) Type() string { + return "ActivityAction" +} + +// Type of activity. Valid values are: * `APPLIED_TRANSITION`: User applied the +// corresponding stage transition. +// +// * `REQUESTED_TRANSITION`: User requested the corresponding stage transition. +// +// * `CANCELLED_REQUEST`: User cancelled an existing transition request. +// +// * `APPROVED_REQUEST`: User approved the corresponding stage transition. +// +// * `REJECTED_REQUEST`: User rejected the coressponding stage transition. +// +// * `SYSTEM_TRANSITION`: For events performed as a side effect, such as +// archiving existing model versions in a stage. +type ActivityType string + +// User applied the corresponding stage transition. +const ActivityTypeAppliedTransition ActivityType = `APPLIED_TRANSITION` + +// User approved the corresponding stage transition. +const ActivityTypeApprovedRequest ActivityType = `APPROVED_REQUEST` + +// User cancelled an existing transition request. +const ActivityTypeCancelledRequest ActivityType = `CANCELLED_REQUEST` + +const ActivityTypeNewComment ActivityType = `NEW_COMMENT` + +// User rejected the coressponding stage transition. +const ActivityTypeRejectedRequest ActivityType = `REJECTED_REQUEST` + +// User requested the corresponding stage transition. +const ActivityTypeRequestedTransition ActivityType = `REQUESTED_TRANSITION` + +// For events performed as a side effect, such as archiving existing model +// versions in a stage. +const ActivityTypeSystemTransition ActivityType = `SYSTEM_TRANSITION` + +// String representation for [fmt.Print] +func (f *ActivityType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ActivityType) Set(v string) error { + switch v { + case `APPLIED_TRANSITION`, `APPROVED_REQUEST`, `CANCELLED_REQUEST`, `NEW_COMMENT`, `REJECTED_REQUEST`, `REQUESTED_TRANSITION`, `SYSTEM_TRANSITION`: + *f = ActivityType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "APPLIED_TRANSITION", "APPROVED_REQUEST", "CANCELLED_REQUEST", "NEW_COMMENT", "REJECTED_REQUEST", "REQUESTED_TRANSITION", "SYSTEM_TRANSITION"`, v) + } +} + +// Type always returns ActivityType to satisfy [pflag.Value] interface +func (f *ActivityType) Type() string { + return "ActivityType" +} + +type ApproveTransitionRequest struct { + // Specifies whether to archive all current model versions in the target + // stage. + ArchiveExistingVersions bool `json:"archive_existing_versions"` + // User-provided comment on the action. + Comment string `json:"comment,omitempty"` + // Name of the model. + Name string `json:"name"` + // Target stage of the transition. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + Stage Stage `json:"stage"` + // Version of the model. + Version string `json:"version"` + + ForceSendFields []string `json:"-"` +} + +func (s *ApproveTransitionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ApproveTransitionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ApproveTransitionRequestResponse struct { + // Activity recorded for the action. + Activity *Activity `json:"activity,omitempty"` +} + +// An action that a user (with sufficient permissions) could take on a comment. +// Valid values are: * `EDIT_COMMENT`: Edit the comment +// +// * `DELETE_COMMENT`: Delete the comment +type CommentActivityAction string + +// Delete the comment +const CommentActivityActionDeleteComment CommentActivityAction = `DELETE_COMMENT` + +// Edit the comment +const CommentActivityActionEditComment CommentActivityAction = `EDIT_COMMENT` + +// String representation for [fmt.Print] +func (f *CommentActivityAction) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CommentActivityAction) Set(v string) error { + switch v { + case `DELETE_COMMENT`, `EDIT_COMMENT`: + *f = CommentActivityAction(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELETE_COMMENT", "EDIT_COMMENT"`, v) + } +} + +// Type always returns CommentActivityAction to satisfy [pflag.Value] interface +func (f *CommentActivityAction) Type() string { + return "CommentActivityAction" +} + +// Comment details. +type CommentObject struct { + // Array of actions on the activity allowed for the current viewer. + AvailableActions []CommentActivityAction `json:"available_actions,omitempty"` + // User-provided comment on the action. + Comment string `json:"comment,omitempty"` + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + // Comment ID + Id string `json:"id,omitempty"` + // Time of the object at last update, as a Unix timestamp in milliseconds. + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // The username of the user that created the object. + UserId string `json:"user_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CommentObject) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CommentObject) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateComment struct { + // User-provided comment on the action. + Comment string `json:"comment"` + // Name of the model. + Name string `json:"name"` + // Version of the model. + Version string `json:"version"` +} + +type CreateCommentResponse struct { + // Comment details. + Comment *CommentObject `json:"comment,omitempty"` +} + +type CreateExperiment struct { + // Location where all artifacts for the experiment are stored. If not + // provided, the remote server will select an appropriate default. + ArtifactLocation string `json:"artifact_location,omitempty"` + // Experiment name. + Name string `json:"name"` + // A collection of tags to set on the experiment. Maximum tag size and + // number of tags per request depends on the storage backend. All storage + // backends are guaranteed to support tag keys up to 250 bytes in size and + // tag values up to 5000 bytes in size. All storage backends are also + // guaranteed to support up to 20 tags per request. + Tags []ExperimentTag `json:"tags,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateExperiment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateExperiment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateExperimentResponse struct { + // Unique identifier for the experiment. + ExperimentId string `json:"experiment_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateExperimentResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateExperimentResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateModelRequest struct { + // Optional description for registered model. + Description string `json:"description,omitempty"` + // Register models under this name + Name string `json:"name"` + // Additional metadata for registered model. + Tags []ModelTag `json:"tags,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateModelRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateModelRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateModelResponse struct { + RegisteredModel *Model `json:"registered_model,omitempty"` +} + +type CreateModelVersionRequest struct { + // Optional description for model version. + Description string `json:"description,omitempty"` + // Register model under this name + Name string `json:"name"` + // MLflow run ID for correlation, if `source` was generated by an experiment + // run in MLflow tracking server + RunId string `json:"run_id,omitempty"` + // MLflow run link - this is the exact link of the run that generated this + // model version, potentially hosted at another instance of MLflow. + RunLink string `json:"run_link,omitempty"` + // URI indicating the location of the model artifacts. + Source string `json:"source"` + // Additional metadata for model version. + Tags []ModelVersionTag `json:"tags,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateModelVersionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateModelVersionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateModelVersionResponse struct { + // Return new version number generated for this model in registry. + ModelVersion *ModelVersion `json:"model_version,omitempty"` +} + +type CreateRegistryWebhook struct { + // User-specified description for the webhook. + Description string `json:"description,omitempty"` + // Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A + // new model version was created for the associated model. + // + // * `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was + // changed. + // + // * `TRANSITION_REQUEST_CREATED`: A user requested a model version’s + // stage be transitioned. + // + // * `COMMENT_CREATED`: A user wrote a comment on a registered model. + // + // * `REGISTERED_MODEL_CREATED`: A new registered model was created. This + // event type can only be specified for a registry-wide webhook, which can + // be created by not specifying a model name in the create request. + // + // * `MODEL_VERSION_TAG_SET`: A user set a tag on the model version. + // + // * `MODEL_VERSION_TRANSITIONED_TO_STAGING`: A model version was + // transitioned to staging. + // + // * `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION`: A model version was + // transitioned to production. + // + // * `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED`: A model version was archived. + // + // * `TRANSITION_REQUEST_TO_STAGING_CREATED`: A user requested a model + // version be transitioned to staging. + // + // * `TRANSITION_REQUEST_TO_PRODUCTION_CREATED`: A user requested a model + // version be transitioned to production. + // + // * `TRANSITION_REQUEST_TO_ARCHIVED_CREATED`: A user requested a model + // version be archived. + Events []RegistryWebhookEvent `json:"events"` + + HttpUrlSpec *HttpUrlSpec `json:"http_url_spec,omitempty"` + + JobSpec *JobSpec `json:"job_spec,omitempty"` + // Name of the model whose events would trigger this webhook. + ModelName string `json:"model_name,omitempty"` + // Enable or disable triggering the webhook, or put the webhook into test + // mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an + // associated event happens. + // + // * `DISABLED`: Webhook is not triggered. + // + // * `TEST_MODE`: Webhook can be triggered through the test endpoint, but is + // not triggered on a real event. + Status RegistryWebhookStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateRegistryWebhook) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateRegistryWebhook) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateRun struct { + // ID of the associated experiment. + ExperimentId string `json:"experiment_id,omitempty"` + // Unix timestamp in milliseconds of when the run started. + StartTime int64 `json:"start_time,omitempty"` + // Additional metadata for run. + Tags []RunTag `json:"tags,omitempty"` + // ID of the user executing the run. This field is deprecated as of MLflow + // 1.0, and will be removed in a future MLflow release. Use 'mlflow.user' + // tag instead. + UserId string `json:"user_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateRun) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateRun) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateRunResponse struct { + // The newly created run. + Run *Run `json:"run,omitempty"` +} + +type CreateTransitionRequest struct { + // User-provided comment on the action. + Comment string `json:"comment,omitempty"` + // Name of the model. + Name string `json:"name"` + // Target stage of the transition. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + Stage Stage `json:"stage"` + // Version of the model. + Version string `json:"version"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateTransitionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateTransitionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateTransitionRequestResponse struct { + // Transition request details. + Request *TransitionRequest `json:"request,omitempty"` +} + +type CreateWebhookResponse struct { + Webhook *RegistryWebhook `json:"webhook,omitempty"` +} + +type Dataset struct { + // Dataset digest, e.g. an md5 hash of the dataset that uniquely identifies + // it within datasets of the same name. + Digest string `json:"digest,omitempty"` + // The name of the dataset. E.g. “my.uc.table@2” “nyc-taxi-dataset”, + // “fantastic-elk-3” + Name string `json:"name,omitempty"` + // The profile of the dataset. Summary statistics for the dataset, such as + // the number of rows in a table, the mean / std / mode of each column in a + // table, or the number of elements in an array. + Profile string `json:"profile,omitempty"` + // The schema of the dataset. E.g., MLflow ColSpec JSON for a dataframe, + // MLflow TensorSpec JSON for an ndarray, or another schema format. + Schema string `json:"schema,omitempty"` + // The type of the dataset source, e.g. ‘databricks-uc-table’, + // ‘DBFS’, ‘S3’, ... + Source string `json:"source,omitempty"` + // Source information for the dataset. Note that the source may not exactly + // reproduce the dataset if it was transformed / modified before use with + // MLflow. + SourceType string `json:"source_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Dataset) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Dataset) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DatasetInput struct { + // The dataset being used as a Run input. + Dataset *Dataset `json:"dataset,omitempty"` + // A list of tags for the dataset input, e.g. a “context” tag with value + // “training” + Tags []InputTag `json:"tags,omitempty"` +} + +// Delete a comment +type DeleteCommentRequest struct { + Id string `json:"-" url:"id"` +} + +type DeleteCommentResponse struct { +} + +type DeleteExperiment struct { + // ID of the associated experiment. + ExperimentId string `json:"experiment_id"` +} + +type DeleteExperimentResponse struct { +} + +// Delete a model +type DeleteModelRequest struct { + // Registered model unique name identifier. + Name string `json:"-" url:"name"` +} + +type DeleteModelResponse struct { +} + +// Delete a model tag +type DeleteModelTagRequest struct { + // Name of the tag. The name must be an exact match; wild-card deletion is + // not supported. Maximum size is 250 bytes. + Key string `json:"-" url:"key"` + // Name of the registered model that the tag was logged under. + Name string `json:"-" url:"name"` +} + +type DeleteModelTagResponse struct { +} + +// Delete a model version. +type DeleteModelVersionRequest struct { + // Name of the registered model + Name string `json:"-" url:"name"` + // Model version number + Version string `json:"-" url:"version"` +} + +type DeleteModelVersionResponse struct { +} + +// Delete a model version tag +type DeleteModelVersionTagRequest struct { + // Name of the tag. The name must be an exact match; wild-card deletion is + // not supported. Maximum size is 250 bytes. + Key string `json:"-" url:"key"` + // Name of the registered model that the tag was logged under. + Name string `json:"-" url:"name"` + // Model version number that the tag was logged under. + Version string `json:"-" url:"version"` +} + +type DeleteModelVersionTagResponse struct { +} + +type DeleteRun struct { + // ID of the run to delete. + RunId string `json:"run_id"` +} + +type DeleteRunResponse struct { +} + +type DeleteRuns struct { + // The ID of the experiment containing the runs to delete. + ExperimentId string `json:"experiment_id"` + // An optional positive integer indicating the maximum number of runs to + // delete. The maximum allowed value for max_runs is 10000. + MaxRuns int `json:"max_runs,omitempty"` + // The maximum creation timestamp in milliseconds since the UNIX epoch for + // deleting runs. Only runs created prior to or at this timestamp are + // deleted. + MaxTimestampMillis int64 `json:"max_timestamp_millis"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteRuns) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteRuns) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DeleteRunsResponse struct { + // The number of runs deleted. + RunsDeleted int `json:"runs_deleted,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteRunsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteRunsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DeleteTag struct { + // Name of the tag. Maximum size is 255 bytes. Must be provided. + Key string `json:"key"` + // ID of the run that the tag was logged under. Must be provided. + RunId string `json:"run_id"` +} + +type DeleteTagResponse struct { +} + +// Delete a transition request +type DeleteTransitionRequestRequest struct { + // User-provided comment on the action. + Comment string `json:"-" url:"comment,omitempty"` + // Username of the user who created this request. Of the transition requests + // matching the specified details, only the one transition created by this + // user will be deleted. + Creator string `json:"-" url:"creator"` + // Name of the model. + Name string `json:"-" url:"name"` + // Target stage of the transition request. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + Stage DeleteTransitionRequestStage `json:"-" url:"stage"` + // Version of the model. + Version string `json:"-" url:"version"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteTransitionRequestRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteTransitionRequestRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DeleteTransitionRequestResponse struct { +} + +type DeleteTransitionRequestStage string + +const DeleteTransitionRequestStageArchived DeleteTransitionRequestStage = `Archived` + +const DeleteTransitionRequestStageNone DeleteTransitionRequestStage = `None` + +const DeleteTransitionRequestStageProduction DeleteTransitionRequestStage = `Production` + +const DeleteTransitionRequestStageStaging DeleteTransitionRequestStage = `Staging` + +// String representation for [fmt.Print] +func (f *DeleteTransitionRequestStage) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DeleteTransitionRequestStage) Set(v string) error { + switch v { + case `Archived`, `None`, `Production`, `Staging`: + *f = DeleteTransitionRequestStage(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Archived", "None", "Production", "Staging"`, v) + } +} + +// Type always returns DeleteTransitionRequestStage to satisfy [pflag.Value] interface +func (f *DeleteTransitionRequestStage) Type() string { + return "DeleteTransitionRequestStage" +} + +// Delete a webhook +type DeleteWebhookRequest struct { + // Webhook ID required to delete a registry webhook. + Id string `json:"-" url:"id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteWebhookRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteWebhookRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DeleteWebhookResponse struct { +} + +type Experiment struct { + // Location where artifacts for the experiment are stored. + ArtifactLocation string `json:"artifact_location,omitempty"` + // Creation time + CreationTime int64 `json:"creation_time,omitempty"` + // Unique identifier for the experiment. + ExperimentId string `json:"experiment_id,omitempty"` + // Last update time + LastUpdateTime int64 `json:"last_update_time,omitempty"` + // Current life cycle stage of the experiment: "active" or "deleted". + // Deleted experiments are not returned by APIs. + LifecycleStage string `json:"lifecycle_stage,omitempty"` + // Human readable name that identifies the experiment. + Name string `json:"name,omitempty"` + // Tags: Additional metadata key-value pairs. + Tags []ExperimentTag `json:"tags,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Experiment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Experiment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExperimentAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel ExperimentPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExperimentAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExperimentAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExperimentAccessControlResponse struct { + // All permissions. + AllPermissions []ExperimentPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExperimentAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExperimentAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExperimentPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel ExperimentPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExperimentPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExperimentPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type ExperimentPermissionLevel string + +const ExperimentPermissionLevelCanEdit ExperimentPermissionLevel = `CAN_EDIT` + +const ExperimentPermissionLevelCanManage ExperimentPermissionLevel = `CAN_MANAGE` + +const ExperimentPermissionLevelCanRead ExperimentPermissionLevel = `CAN_READ` + +// String representation for [fmt.Print] +func (f *ExperimentPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExperimentPermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_READ`: + *f = ExperimentPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_READ"`, v) + } +} + +// Type always returns ExperimentPermissionLevel to satisfy [pflag.Value] interface +func (f *ExperimentPermissionLevel) Type() string { + return "ExperimentPermissionLevel" +} + +type ExperimentPermissions struct { + AccessControlList []ExperimentAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExperimentPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExperimentPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExperimentPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel ExperimentPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExperimentPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExperimentPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExperimentPermissionsRequest struct { + AccessControlList []ExperimentAccessControlRequest `json:"access_control_list,omitempty"` + // The experiment for which to get or manage permissions. + ExperimentId string `json:"-" url:"-"` +} + +type ExperimentTag struct { + // The tag key. + Key string `json:"key,omitempty"` + // The tag value. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExperimentTag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExperimentTag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type FileInfo struct { + // Size in bytes. Unset for directories. + FileSize int64 `json:"file_size,omitempty"` + // Whether the path is a directory. + IsDir bool `json:"is_dir,omitempty"` + // Path relative to the root artifact directory run. + Path string `json:"path,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *FileInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FileInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get metadata +type GetByNameRequest struct { + // Name of the associated experiment. + ExperimentName string `json:"-" url:"experiment_name"` +} + +// Get experiment permission levels +type GetExperimentPermissionLevelsRequest struct { + // The experiment for which to get or manage permissions. + ExperimentId string `json:"-" url:"-"` +} + +type GetExperimentPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []ExperimentPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get experiment permissions +type GetExperimentPermissionsRequest struct { + // The experiment for which to get or manage permissions. + ExperimentId string `json:"-" url:"-"` +} + +// Get an experiment +type GetExperimentRequest struct { + // ID of the associated experiment. + ExperimentId string `json:"-" url:"experiment_id"` +} + +type GetExperimentResponse struct { + // Experiment details. + Experiment *Experiment `json:"experiment,omitempty"` +} + +// Get history of a given metric within a run +type GetHistoryRequest struct { + // Maximum number of Metric records to return per paginated request. Default + // is set to 25,000. If set higher than 25,000, a request Exception will be + // raised. + MaxResults int `json:"-" url:"max_results,omitempty"` + // Name of the metric. + MetricKey string `json:"-" url:"metric_key"` + // Token indicating the page of metric histories to fetch. + PageToken string `json:"-" url:"page_token,omitempty"` + // ID of the run from which to fetch metric values. Must be provided. + RunId string `json:"-" url:"run_id,omitempty"` + // [Deprecated, use run_id instead] ID of the run from which to fetch metric + // values. This field will be removed in a future MLflow version. + RunUuid string `json:"-" url:"run_uuid,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetHistoryRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetHistoryRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GetLatestVersionsRequest struct { + // Registered model unique name identifier. + Name string `json:"name"` + // List of stages. + Stages []string `json:"stages,omitempty"` +} + +type GetLatestVersionsResponse struct { + // Latest version models for each requests stage. Only return models with + // current `READY` status. If no `stages` provided, returns the latest + // version for each stage, including `"None"`. + ModelVersions []ModelVersion `json:"model_versions,omitempty"` +} + +type GetMetricHistoryResponse struct { + // All logged values for this metric. + Metrics []Metric `json:"metrics,omitempty"` + // Token that can be used to retrieve the next page of metric history + // results + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetMetricHistoryResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetMetricHistoryResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get model +type GetModelRequest struct { + // Registered model unique name identifier. + Name string `json:"-" url:"name"` +} + +type GetModelResponse struct { + RegisteredModelDatabricks *ModelDatabricks `json:"registered_model_databricks,omitempty"` +} + +// Get a model version URI +type GetModelVersionDownloadUriRequest struct { + // Name of the registered model + Name string `json:"-" url:"name"` + // Model version number + Version string `json:"-" url:"version"` +} + +type GetModelVersionDownloadUriResponse struct { + // URI corresponding to where artifacts for this model version are stored. + ArtifactUri string `json:"artifact_uri,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetModelVersionDownloadUriResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetModelVersionDownloadUriResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get a model version +type GetModelVersionRequest struct { + // Name of the registered model + Name string `json:"-" url:"name"` + // Model version number + Version string `json:"-" url:"version"` +} + +type GetModelVersionResponse struct { + ModelVersion *ModelVersion `json:"model_version,omitempty"` +} + +// Get registered model permission levels +type GetRegisteredModelPermissionLevelsRequest struct { + // The registered model for which to get or manage permissions. + RegisteredModelId string `json:"-" url:"-"` +} + +type GetRegisteredModelPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []RegisteredModelPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get registered model permissions +type GetRegisteredModelPermissionsRequest struct { + // The registered model for which to get or manage permissions. + RegisteredModelId string `json:"-" url:"-"` +} + +// Get a run +type GetRunRequest struct { + // ID of the run to fetch. Must be provided. + RunId string `json:"-" url:"run_id"` + // [Deprecated, use run_id instead] ID of the run to fetch. This field will + // be removed in a future MLflow version. + RunUuid string `json:"-" url:"run_uuid,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetRunRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetRunRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GetRunResponse struct { + // Run metadata (name, start time, etc) and data (metrics, params, and + // tags). + Run *Run `json:"run,omitempty"` +} + +type HttpUrlSpec struct { + // Value of the authorization header that should be sent in the request sent + // by the wehbook. It should be of the form `" "`. + // If set to an empty string, no authorization header will be included in + // the request. + Authorization string `json:"authorization,omitempty"` + // Enable/disable SSL certificate validation. Default is true. For + // self-signed certificates, this field must be false AND the destination + // server must disable certificate validation as well. For security + // purposes, it is encouraged to perform secret validation with the + // HMAC-encoded portion of the payload and acknowledge the risk associated + // with disabling hostname validation whereby it becomes more likely that + // requests can be maliciously routed to an unintended host. + EnableSslVerification bool `json:"enable_ssl_verification,omitempty"` + // Shared secret required for HMAC encoding payload. The HMAC-encoded + // payload will be sent in the header as: { "X-Databricks-Signature": + // $encoded_payload }. + Secret string `json:"secret,omitempty"` + // External HTTPS URL called on event trigger (by using a POST request). + Url string `json:"url"` + + ForceSendFields []string `json:"-"` +} + +func (s *HttpUrlSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s HttpUrlSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type HttpUrlSpecWithoutSecret struct { + // Enable/disable SSL certificate validation. Default is true. For + // self-signed certificates, this field must be false AND the destination + // server must disable certificate validation as well. For security + // purposes, it is encouraged to perform secret validation with the + // HMAC-encoded portion of the payload and acknowledge the risk associated + // with disabling hostname validation whereby it becomes more likely that + // requests can be maliciously routed to an unintended host. + EnableSslVerification bool `json:"enable_ssl_verification,omitempty"` + // External HTTPS URL called on event trigger (by using a POST request). + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *HttpUrlSpecWithoutSecret) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s HttpUrlSpecWithoutSecret) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InputTag struct { + // The tag key. + Key string `json:"key,omitempty"` + // The tag value. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *InputTag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InputTag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobSpec struct { + // The personal access token used to authorize webhook's job runs. + AccessToken string `json:"access_token"` + // ID of the job that the webhook runs. + JobId string `json:"job_id"` + // URL of the workspace containing the job that this webhook runs. If not + // specified, the job’s workspace URL is assumed to be the same as the + // workspace where the webhook is created. + WorkspaceUrl string `json:"workspace_url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type JobSpecWithoutSecret struct { + // ID of the job that the webhook runs. + JobId string `json:"job_id,omitempty"` + // URL of the workspace containing the job that this webhook runs. Defaults + // to the workspace URL in which the webhook is created. If not specified, + // the job’s workspace is assumed to be the same as the webhook’s. + WorkspaceUrl string `json:"workspace_url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *JobSpecWithoutSecret) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobSpecWithoutSecret) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get all artifacts +type ListArtifactsRequest struct { + // Token indicating the page of artifact results to fetch. `page_token` is + // not supported when listing artifacts in UC Volumes. A maximum of 1000 + // artifacts will be retrieved for UC Volumes. Please call + // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC + // Volumes, which supports pagination. See [List directory contents | Files + // API](/api/workspace/files/listdirectorycontents). + PageToken string `json:"-" url:"page_token,omitempty"` + // Filter artifacts matching this path (a relative path from the root + // artifact directory). + Path string `json:"-" url:"path,omitempty"` + // ID of the run whose artifacts to list. Must be provided. + RunId string `json:"-" url:"run_id,omitempty"` + // [Deprecated, use run_id instead] ID of the run whose artifacts to list. + // This field will be removed in a future MLflow version. + RunUuid string `json:"-" url:"run_uuid,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListArtifactsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListArtifactsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListArtifactsResponse struct { + // File location and metadata for artifacts. + Files []FileInfo `json:"files,omitempty"` + // Token that can be used to retrieve the next page of artifact results + NextPageToken string `json:"next_page_token,omitempty"` + // Root artifact directory for the run. + RootUri string `json:"root_uri,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListArtifactsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListArtifactsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List experiments +type ListExperimentsRequest struct { + // Maximum number of experiments desired. If `max_results` is unspecified, + // return all experiments. If `max_results` is too large, it'll be + // automatically capped at 1000. Callers of this endpoint are encouraged to + // pass max_results explicitly and leverage page_token to iterate through + // experiments. + MaxResults int `json:"-" url:"max_results,omitempty"` + // Token indicating the page of experiments to fetch + PageToken string `json:"-" url:"page_token,omitempty"` + // Qualifier for type of experiments to be returned. If unspecified, return + // only active experiments. + ViewType string `json:"-" url:"view_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListExperimentsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListExperimentsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListExperimentsResponse struct { + // Paginated Experiments beginning with the first item on the requested + // page. + Experiments []Experiment `json:"experiments,omitempty"` + // Token that can be used to retrieve the next page of experiments. Empty + // token means no more experiment is available for retrieval. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListExperimentsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListExperimentsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List models +type ListModelsRequest struct { + // Maximum number of registered models desired. Max threshold is 1000. + MaxResults int `json:"-" url:"max_results,omitempty"` + // Pagination token to go to the next page based on a previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListModelsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListModelsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListModelsResponse struct { + // Pagination token to request next page of models for the same query. + NextPageToken string `json:"next_page_token,omitempty"` + + RegisteredModels []Model `json:"registered_models,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListModelsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListModelsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListRegistryWebhooks struct { + // Token that can be used to retrieve the next page of artifact results + NextPageToken string `json:"next_page_token,omitempty"` + // Array of registry webhooks. + Webhooks []RegistryWebhook `json:"webhooks,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListRegistryWebhooks) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListRegistryWebhooks) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List transition requests +type ListTransitionRequestsRequest struct { + // Name of the model. + Name string `json:"-" url:"name"` + // Version of the model. + Version string `json:"-" url:"version"` +} + +type ListTransitionRequestsResponse struct { + // Array of open transition requests. + Requests []Activity `json:"requests,omitempty"` +} + +// List registry webhooks +type ListWebhooksRequest struct { + // If `events` is specified, any webhook with one or more of the specified + // trigger events is included in the output. If `events` is not specified, + // webhooks of all event types are included in the output. + Events []RegistryWebhookEvent `json:"-" url:"events,omitempty"` + // If not specified, all webhooks associated with the specified events are + // listed, regardless of their associated model. + ModelName string `json:"-" url:"model_name,omitempty"` + // Token indicating the page of artifact results to fetch + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListWebhooksRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListWebhooksRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type LogBatch struct { + // Metrics to log. A single request can contain up to 1000 metrics, and up + // to 1000 metrics, params, and tags in total. + Metrics []Metric `json:"metrics,omitempty"` + // Params to log. A single request can contain up to 100 params, and up to + // 1000 metrics, params, and tags in total. + Params []Param `json:"params,omitempty"` + // ID of the run to log under + RunId string `json:"run_id,omitempty"` + // Tags to log. A single request can contain up to 100 tags, and up to 1000 + // metrics, params, and tags in total. + Tags []RunTag `json:"tags,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogBatch) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogBatch) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type LogBatchResponse struct { +} + +type LogInputs struct { + // Dataset inputs + Datasets []DatasetInput `json:"datasets,omitempty"` + // ID of the run to log under + RunId string `json:"run_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogInputs) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogInputs) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type LogInputsResponse struct { +} + +type LogMetric struct { + // Name of the metric. + Key string `json:"key"` + // ID of the run under which to log the metric. Must be provided. + RunId string `json:"run_id,omitempty"` + // [Deprecated, use run_id instead] ID of the run under which to log the + // metric. This field will be removed in a future MLflow version. + RunUuid string `json:"run_uuid,omitempty"` + // Step at which to log the metric + Step int64 `json:"step,omitempty"` + // Unix timestamp in milliseconds at the time metric was logged. + Timestamp int64 `json:"timestamp"` + // Double value of the metric being logged. + Value float64 `json:"value"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogMetric) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogMetric) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type LogMetricResponse struct { +} + +type LogModel struct { + // MLmodel file in json format. + ModelJson string `json:"model_json,omitempty"` + // ID of the run to log under + RunId string `json:"run_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogModel) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogModel) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type LogModelResponse struct { +} + +type LogParam struct { + // Name of the param. Maximum size is 255 bytes. + Key string `json:"key"` + // ID of the run under which to log the param. Must be provided. + RunId string `json:"run_id,omitempty"` + // [Deprecated, use run_id instead] ID of the run under which to log the + // param. This field will be removed in a future MLflow version. + RunUuid string `json:"run_uuid,omitempty"` + // String value of the param being logged. Maximum size is 500 bytes. + Value string `json:"value"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogParam) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogParam) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type LogParamResponse struct { +} + +type Metric struct { + // Key identifying this metric. + Key string `json:"key,omitempty"` + // Step at which to log the metric. + Step int64 `json:"step,omitempty"` + // The timestamp at which this metric was recorded. + Timestamp int64 `json:"timestamp,omitempty"` + // Value associated with this metric. + Value float64 `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Metric) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Metric) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Model struct { + // Timestamp recorded when this `registered_model` was created. + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + // Description of this `registered_model`. + Description string `json:"description,omitempty"` + // Timestamp recorded when metadata for this `registered_model` was last + // updated. + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // Collection of latest model versions for each stage. Only contains models + // with current `READY` status. + LatestVersions []ModelVersion `json:"latest_versions,omitempty"` + // Unique name for the model. + Name string `json:"name,omitempty"` + // Tags: Additional metadata key-value pairs for this `registered_model`. + Tags []ModelTag `json:"tags,omitempty"` + // User that created this `registered_model` + UserId string `json:"user_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Model) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Model) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ModelDatabricks struct { + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + // User-specified description for the object. + Description string `json:"description,omitempty"` + // Unique identifier for the object. + Id string `json:"id,omitempty"` + // Time of the object at last update, as a Unix timestamp in milliseconds. + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // Array of model versions, each the latest version for its stage. + LatestVersions []ModelVersion `json:"latest_versions,omitempty"` + // Name of the model. + Name string `json:"name,omitempty"` + // Permission level of the requesting user on the object. For what is + // allowed at each level, see [MLflow Model permissions](..). + PermissionLevel PermissionLevel `json:"permission_level,omitempty"` + // Array of tags associated with the model. + Tags []ModelTag `json:"tags,omitempty"` + // The username of the user that created the object. + UserId string `json:"user_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ModelDatabricks) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ModelDatabricks) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ModelTag struct { + // The tag key. + Key string `json:"key,omitempty"` + // The tag value. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ModelTag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ModelTag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ModelVersion struct { + // Timestamp recorded when this `model_version` was created. + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + // Current stage for this `model_version`. + CurrentStage string `json:"current_stage,omitempty"` + // Description of this `model_version`. + Description string `json:"description,omitempty"` + // Timestamp recorded when metadata for this `model_version` was last + // updated. + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // Unique name of the model + Name string `json:"name,omitempty"` + // MLflow run ID used when creating `model_version`, if `source` was + // generated by an experiment run stored in MLflow tracking server. + RunId string `json:"run_id,omitempty"` + // Run Link: Direct link to the run that generated this version + RunLink string `json:"run_link,omitempty"` + // URI indicating the location of the source model artifacts, used when + // creating `model_version` + Source string `json:"source,omitempty"` + // Current status of `model_version` + Status ModelVersionStatus `json:"status,omitempty"` + // Details on current `status`, if it is pending or failed. + StatusMessage string `json:"status_message,omitempty"` + // Tags: Additional metadata key-value pairs for this `model_version`. + Tags []ModelVersionTag `json:"tags,omitempty"` + // User that created this `model_version`. + UserId string `json:"user_id,omitempty"` + // Model's version number. + Version string `json:"version,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ModelVersion) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ModelVersion) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ModelVersionDatabricks struct { + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + // Stage of the model version. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + CurrentStage Stage `json:"current_stage,omitempty"` + // User-specified description for the object. + Description string `json:"description,omitempty"` + // Time of the object at last update, as a Unix timestamp in milliseconds. + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // Name of the model. + Name string `json:"name,omitempty"` + // Permission level of the requesting user on the object. For what is + // allowed at each level, see [MLflow Model permissions](..). + PermissionLevel PermissionLevel `json:"permission_level,omitempty"` + // Unique identifier for the MLflow tracking run associated with the source + // model artifacts. + RunId string `json:"run_id,omitempty"` + // URL of the run associated with the model artifacts. This field is set at + // model version creation time only for model versions whose source run is + // from a tracking server that is different from the registry server. + RunLink string `json:"run_link,omitempty"` + // URI that indicates the location of the source model artifacts. This is + // used when creating the model version. + Source string `json:"source,omitempty"` + // The status of the model version. Valid values are: * + // `PENDING_REGISTRATION`: Request to register a new model version is + // pending as server performs background tasks. + // + // * `FAILED_REGISTRATION`: Request to register a new model version has + // failed. + // + // * `READY`: Model version is ready for use. + Status Status `json:"status,omitempty"` + // Details on the current status, for example why registration failed. + StatusMessage string `json:"status_message,omitempty"` + // Array of tags that are associated with the model version. + Tags []ModelVersionTag `json:"tags,omitempty"` + // The username of the user that created the object. + UserId string `json:"user_id,omitempty"` + // Version of the model. + Version string `json:"version,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ModelVersionDatabricks) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ModelVersionDatabricks) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Current status of `model_version` +type ModelVersionStatus string + +const ModelVersionStatusFailedRegistration ModelVersionStatus = `FAILED_REGISTRATION` + +const ModelVersionStatusPendingRegistration ModelVersionStatus = `PENDING_REGISTRATION` + +const ModelVersionStatusReady ModelVersionStatus = `READY` + +// String representation for [fmt.Print] +func (f *ModelVersionStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ModelVersionStatus) Set(v string) error { + switch v { + case `FAILED_REGISTRATION`, `PENDING_REGISTRATION`, `READY`: + *f = ModelVersionStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED_REGISTRATION", "PENDING_REGISTRATION", "READY"`, v) + } +} + +// Type always returns ModelVersionStatus to satisfy [pflag.Value] interface +func (f *ModelVersionStatus) Type() string { + return "ModelVersionStatus" +} + +type ModelVersionTag struct { + // The tag key. + Key string `json:"key,omitempty"` + // The tag value. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ModelVersionTag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ModelVersionTag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Param struct { + // Key identifying this param. + Key string `json:"key,omitempty"` + // Value associated with this param. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Param) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Param) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level of the requesting user on the object. For what is allowed at +// each level, see [MLflow Model permissions](..). +type PermissionLevel string + +const PermissionLevelCanEdit PermissionLevel = `CAN_EDIT` + +const PermissionLevelCanManage PermissionLevel = `CAN_MANAGE` + +const PermissionLevelCanManageProductionVersions PermissionLevel = `CAN_MANAGE_PRODUCTION_VERSIONS` + +const PermissionLevelCanManageStagingVersions PermissionLevel = `CAN_MANAGE_STAGING_VERSIONS` + +const PermissionLevelCanRead PermissionLevel = `CAN_READ` + +// String representation for [fmt.Print] +func (f *PermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_MANAGE_PRODUCTION_VERSIONS`, `CAN_MANAGE_STAGING_VERSIONS`, `CAN_READ`: + *f = PermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE_STAGING_VERSIONS", "CAN_READ"`, v) + } +} + +// Type always returns PermissionLevel to satisfy [pflag.Value] interface +func (f *PermissionLevel) Type() string { + return "PermissionLevel" +} + +type RegisteredModelAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel RegisteredModelPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegisteredModelAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegisteredModelAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RegisteredModelAccessControlResponse struct { + // All permissions. + AllPermissions []RegisteredModelPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegisteredModelAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegisteredModelAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RegisteredModelPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel RegisteredModelPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegisteredModelPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegisteredModelPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type RegisteredModelPermissionLevel string + +const RegisteredModelPermissionLevelCanEdit RegisteredModelPermissionLevel = `CAN_EDIT` + +const RegisteredModelPermissionLevelCanManage RegisteredModelPermissionLevel = `CAN_MANAGE` + +const RegisteredModelPermissionLevelCanManageProductionVersions RegisteredModelPermissionLevel = `CAN_MANAGE_PRODUCTION_VERSIONS` + +const RegisteredModelPermissionLevelCanManageStagingVersions RegisteredModelPermissionLevel = `CAN_MANAGE_STAGING_VERSIONS` + +const RegisteredModelPermissionLevelCanRead RegisteredModelPermissionLevel = `CAN_READ` + +// String representation for [fmt.Print] +func (f *RegisteredModelPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RegisteredModelPermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_MANAGE_PRODUCTION_VERSIONS`, `CAN_MANAGE_STAGING_VERSIONS`, `CAN_READ`: + *f = RegisteredModelPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE_STAGING_VERSIONS", "CAN_READ"`, v) + } +} + +// Type always returns RegisteredModelPermissionLevel to satisfy [pflag.Value] interface +func (f *RegisteredModelPermissionLevel) Type() string { + return "RegisteredModelPermissionLevel" +} + +type RegisteredModelPermissions struct { + AccessControlList []RegisteredModelAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegisteredModelPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegisteredModelPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RegisteredModelPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel RegisteredModelPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegisteredModelPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegisteredModelPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RegisteredModelPermissionsRequest struct { + AccessControlList []RegisteredModelAccessControlRequest `json:"access_control_list,omitempty"` + // The registered model for which to get or manage permissions. + RegisteredModelId string `json:"-" url:"-"` +} + +type RegistryWebhook struct { + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + // User-specified description for the webhook. + Description string `json:"description,omitempty"` + // Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A + // new model version was created for the associated model. + // + // * `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was + // changed. + // + // * `TRANSITION_REQUEST_CREATED`: A user requested a model version’s + // stage be transitioned. + // + // * `COMMENT_CREATED`: A user wrote a comment on a registered model. + // + // * `REGISTERED_MODEL_CREATED`: A new registered model was created. This + // event type can only be specified for a registry-wide webhook, which can + // be created by not specifying a model name in the create request. + // + // * `MODEL_VERSION_TAG_SET`: A user set a tag on the model version. + // + // * `MODEL_VERSION_TRANSITIONED_TO_STAGING`: A model version was + // transitioned to staging. + // + // * `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION`: A model version was + // transitioned to production. + // + // * `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED`: A model version was archived. + // + // * `TRANSITION_REQUEST_TO_STAGING_CREATED`: A user requested a model + // version be transitioned to staging. + // + // * `TRANSITION_REQUEST_TO_PRODUCTION_CREATED`: A user requested a model + // version be transitioned to production. + // + // * `TRANSITION_REQUEST_TO_ARCHIVED_CREATED`: A user requested a model + // version be archived. + Events []RegistryWebhookEvent `json:"events,omitempty"` + + HttpUrlSpec *HttpUrlSpecWithoutSecret `json:"http_url_spec,omitempty"` + // Webhook ID + Id string `json:"id,omitempty"` + + JobSpec *JobSpecWithoutSecret `json:"job_spec,omitempty"` + // Time of the object at last update, as a Unix timestamp in milliseconds. + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // Name of the model whose events would trigger this webhook. + ModelName string `json:"model_name,omitempty"` + // Enable or disable triggering the webhook, or put the webhook into test + // mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an + // associated event happens. + // + // * `DISABLED`: Webhook is not triggered. + // + // * `TEST_MODE`: Webhook can be triggered through the test endpoint, but is + // not triggered on a real event. + Status RegistryWebhookStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RegistryWebhook) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegistryWebhook) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RegistryWebhookEvent string + +const RegistryWebhookEventCommentCreated RegistryWebhookEvent = `COMMENT_CREATED` + +const RegistryWebhookEventModelVersionCreated RegistryWebhookEvent = `MODEL_VERSION_CREATED` + +const RegistryWebhookEventModelVersionTagSet RegistryWebhookEvent = `MODEL_VERSION_TAG_SET` + +const RegistryWebhookEventModelVersionTransitionedStage RegistryWebhookEvent = `MODEL_VERSION_TRANSITIONED_STAGE` + +const RegistryWebhookEventModelVersionTransitionedToArchived RegistryWebhookEvent = `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED` + +const RegistryWebhookEventModelVersionTransitionedToProduction RegistryWebhookEvent = `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION` + +const RegistryWebhookEventModelVersionTransitionedToStaging RegistryWebhookEvent = `MODEL_VERSION_TRANSITIONED_TO_STAGING` + +const RegistryWebhookEventRegisteredModelCreated RegistryWebhookEvent = `REGISTERED_MODEL_CREATED` + +const RegistryWebhookEventTransitionRequestCreated RegistryWebhookEvent = `TRANSITION_REQUEST_CREATED` + +const RegistryWebhookEventTransitionRequestToArchivedCreated RegistryWebhookEvent = `TRANSITION_REQUEST_TO_ARCHIVED_CREATED` + +const RegistryWebhookEventTransitionRequestToProductionCreated RegistryWebhookEvent = `TRANSITION_REQUEST_TO_PRODUCTION_CREATED` + +const RegistryWebhookEventTransitionRequestToStagingCreated RegistryWebhookEvent = `TRANSITION_REQUEST_TO_STAGING_CREATED` + +// String representation for [fmt.Print] +func (f *RegistryWebhookEvent) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RegistryWebhookEvent) Set(v string) error { + switch v { + case `COMMENT_CREATED`, `MODEL_VERSION_CREATED`, `MODEL_VERSION_TAG_SET`, `MODEL_VERSION_TRANSITIONED_STAGE`, `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED`, `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION`, `MODEL_VERSION_TRANSITIONED_TO_STAGING`, `REGISTERED_MODEL_CREATED`, `TRANSITION_REQUEST_CREATED`, `TRANSITION_REQUEST_TO_ARCHIVED_CREATED`, `TRANSITION_REQUEST_TO_PRODUCTION_CREATED`, `TRANSITION_REQUEST_TO_STAGING_CREATED`: + *f = RegistryWebhookEvent(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COMMENT_CREATED", "MODEL_VERSION_CREATED", "MODEL_VERSION_TAG_SET", "MODEL_VERSION_TRANSITIONED_STAGE", "MODEL_VERSION_TRANSITIONED_TO_ARCHIVED", "MODEL_VERSION_TRANSITIONED_TO_PRODUCTION", "MODEL_VERSION_TRANSITIONED_TO_STAGING", "REGISTERED_MODEL_CREATED", "TRANSITION_REQUEST_CREATED", "TRANSITION_REQUEST_TO_ARCHIVED_CREATED", "TRANSITION_REQUEST_TO_PRODUCTION_CREATED", "TRANSITION_REQUEST_TO_STAGING_CREATED"`, v) + } +} + +// Type always returns RegistryWebhookEvent to satisfy [pflag.Value] interface +func (f *RegistryWebhookEvent) Type() string { + return "RegistryWebhookEvent" +} + +// Enable or disable triggering the webhook, or put the webhook into test mode. +// The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an associated +// event happens. +// +// * `DISABLED`: Webhook is not triggered. +// +// * `TEST_MODE`: Webhook can be triggered through the test endpoint, but is not +// triggered on a real event. +type RegistryWebhookStatus string + +// Webhook is triggered when an associated event happens. +const RegistryWebhookStatusActive RegistryWebhookStatus = `ACTIVE` + +// Webhook is not triggered. +const RegistryWebhookStatusDisabled RegistryWebhookStatus = `DISABLED` + +// Webhook can be triggered through the test endpoint, but is not triggered on a +// real event. +const RegistryWebhookStatusTestMode RegistryWebhookStatus = `TEST_MODE` + +// String representation for [fmt.Print] +func (f *RegistryWebhookStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RegistryWebhookStatus) Set(v string) error { + switch v { + case `ACTIVE`, `DISABLED`, `TEST_MODE`: + *f = RegistryWebhookStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "DISABLED", "TEST_MODE"`, v) + } +} + +// Type always returns RegistryWebhookStatus to satisfy [pflag.Value] interface +func (f *RegistryWebhookStatus) Type() string { + return "RegistryWebhookStatus" +} + +type RejectTransitionRequest struct { + // User-provided comment on the action. + Comment string `json:"comment,omitempty"` + // Name of the model. + Name string `json:"name"` + // Target stage of the transition. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + Stage Stage `json:"stage"` + // Version of the model. + Version string `json:"version"` + + ForceSendFields []string `json:"-"` +} + +func (s *RejectTransitionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RejectTransitionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RejectTransitionRequestResponse struct { + // Activity recorded for the action. + Activity *Activity `json:"activity,omitempty"` +} + +type RenameModelRequest struct { + // Registered model unique name identifier. + Name string `json:"name"` + // If provided, updates the name for this `registered_model`. + NewName string `json:"new_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RenameModelRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RenameModelRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RenameModelResponse struct { + RegisteredModel *Model `json:"registered_model,omitempty"` +} + +type RestoreExperiment struct { + // ID of the associated experiment. + ExperimentId string `json:"experiment_id"` +} + +type RestoreExperimentResponse struct { +} + +type RestoreRun struct { + // ID of the run to restore. + RunId string `json:"run_id"` +} + +type RestoreRunResponse struct { +} + +type RestoreRuns struct { + // The ID of the experiment containing the runs to restore. + ExperimentId string `json:"experiment_id"` + // An optional positive integer indicating the maximum number of runs to + // restore. The maximum allowed value for max_runs is 10000. + MaxRuns int `json:"max_runs,omitempty"` + // The minimum deletion timestamp in milliseconds since the UNIX epoch for + // restoring runs. Only runs deleted no earlier than this timestamp are + // restored. + MinTimestampMillis int64 `json:"min_timestamp_millis"` + + ForceSendFields []string `json:"-"` +} + +func (s *RestoreRuns) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RestoreRuns) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RestoreRunsResponse struct { + // The number of runs restored. + RunsRestored int `json:"runs_restored,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RestoreRunsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RestoreRunsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Run struct { + // Run data. + Data *RunData `json:"data,omitempty"` + // Run metadata. + Info *RunInfo `json:"info,omitempty"` + // Run inputs. + Inputs *RunInputs `json:"inputs,omitempty"` +} + +type RunData struct { + // Run metrics. + Metrics []Metric `json:"metrics,omitempty"` + // Run parameters. + Params []Param `json:"params,omitempty"` + // Additional metadata key-value pairs. + Tags []RunTag `json:"tags,omitempty"` +} + +type RunInfo struct { + // URI of the directory where artifacts should be uploaded. This can be a + // local path (starting with "/"), or a distributed file system (DFS) path, + // like `s3://bucket/directory` or `dbfs:/my/directory`. If not set, the + // local `./mlruns` directory is chosen. + ArtifactUri string `json:"artifact_uri,omitempty"` + // Unix timestamp of when the run ended in milliseconds. + EndTime int64 `json:"end_time,omitempty"` + // The experiment ID. + ExperimentId string `json:"experiment_id,omitempty"` + // Current life cycle stage of the experiment : OneOf("active", "deleted") + LifecycleStage string `json:"lifecycle_stage,omitempty"` + // Unique identifier for the run. + RunId string `json:"run_id,omitempty"` + // [Deprecated, use run_id instead] Unique identifier for the run. This + // field will be removed in a future MLflow version. + RunUuid string `json:"run_uuid,omitempty"` + // Unix timestamp of when the run started in milliseconds. + StartTime int64 `json:"start_time,omitempty"` + // Current status of the run. + Status RunInfoStatus `json:"status,omitempty"` + // User who initiated the run. This field is deprecated as of MLflow 1.0, + // and will be removed in a future MLflow release. Use 'mlflow.user' tag + // instead. + UserId string `json:"user_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RunInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RunInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Current status of the run. +type RunInfoStatus string + +const RunInfoStatusFailed RunInfoStatus = `FAILED` + +const RunInfoStatusFinished RunInfoStatus = `FINISHED` + +const RunInfoStatusKilled RunInfoStatus = `KILLED` + +const RunInfoStatusRunning RunInfoStatus = `RUNNING` + +const RunInfoStatusScheduled RunInfoStatus = `SCHEDULED` + +// String representation for [fmt.Print] +func (f *RunInfoStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunInfoStatus) Set(v string) error { + switch v { + case `FAILED`, `FINISHED`, `KILLED`, `RUNNING`, `SCHEDULED`: + *f = RunInfoStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED", "FINISHED", "KILLED", "RUNNING", "SCHEDULED"`, v) + } +} + +// Type always returns RunInfoStatus to satisfy [pflag.Value] interface +func (f *RunInfoStatus) Type() string { + return "RunInfoStatus" +} + +type RunInputs struct { + // Run metrics. + DatasetInputs []DatasetInput `json:"dataset_inputs,omitempty"` +} + +type RunTag struct { + // The tag key. + Key string `json:"key,omitempty"` + // The tag value. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RunTag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RunTag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SearchExperiments struct { + // String representing a SQL filter condition (e.g. "name ILIKE + // 'my-experiment%'") + Filter string `json:"filter,omitempty"` + // Maximum number of experiments desired. Max threshold is 3000. + MaxResults int64 `json:"max_results,omitempty"` + // List of columns for ordering search results, which can include experiment + // name and last updated timestamp with an optional "DESC" or "ASC" + // annotation, where "ASC" is the default. Tiebreaks are done by experiment + // id DESC. + OrderBy []string `json:"order_by,omitempty"` + // Token indicating the page of experiments to fetch + PageToken string `json:"page_token,omitempty"` + // Qualifier for type of experiments to be returned. If unspecified, return + // only active experiments. + ViewType SearchExperimentsViewType `json:"view_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SearchExperiments) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SearchExperiments) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SearchExperimentsResponse struct { + // Experiments that match the search criteria + Experiments []Experiment `json:"experiments,omitempty"` + // Token that can be used to retrieve the next page of experiments. An empty + // token means that no more experiments are available for retrieval. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SearchExperimentsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SearchExperimentsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Qualifier for type of experiments to be returned. If unspecified, return only +// active experiments. +type SearchExperimentsViewType string + +const SearchExperimentsViewTypeActiveOnly SearchExperimentsViewType = `ACTIVE_ONLY` + +const SearchExperimentsViewTypeAll SearchExperimentsViewType = `ALL` + +const SearchExperimentsViewTypeDeletedOnly SearchExperimentsViewType = `DELETED_ONLY` + +// String representation for [fmt.Print] +func (f *SearchExperimentsViewType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SearchExperimentsViewType) Set(v string) error { + switch v { + case `ACTIVE_ONLY`, `ALL`, `DELETED_ONLY`: + *f = SearchExperimentsViewType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE_ONLY", "ALL", "DELETED_ONLY"`, v) + } +} + +// Type always returns SearchExperimentsViewType to satisfy [pflag.Value] interface +func (f *SearchExperimentsViewType) Type() string { + return "SearchExperimentsViewType" +} + +// Searches model versions +type SearchModelVersionsRequest struct { + // String filter condition, like "name='my-model-name'". Must be a single + // boolean condition, with string values wrapped in single quotes. + Filter string `json:"-" url:"filter,omitempty"` + // Maximum number of models desired. Max threshold is 10K. + MaxResults int `json:"-" url:"max_results,omitempty"` + // List of columns to be ordered by including model name, version, stage + // with an optional "DESC" or "ASC" annotation, where "ASC" is the default. + // Tiebreaks are done by latest stage transition timestamp, followed by name + // ASC, followed by version DESC. + OrderBy []string `json:"-" url:"order_by,omitempty"` + // Pagination token to go to next page based on previous search query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SearchModelVersionsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SearchModelVersionsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SearchModelVersionsResponse struct { + // Models that match the search criteria + ModelVersions []ModelVersion `json:"model_versions,omitempty"` + // Pagination token to request next page of models for the same search + // query. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SearchModelVersionsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SearchModelVersionsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Search models +type SearchModelsRequest struct { + // String filter condition, like "name LIKE 'my-model-name'". Interpreted in + // the backend automatically as "name LIKE '%my-model-name%'". Single + // boolean condition, with string values wrapped in single quotes. + Filter string `json:"-" url:"filter,omitempty"` + // Maximum number of models desired. Default is 100. Max threshold is 1000. + MaxResults int `json:"-" url:"max_results,omitempty"` + // List of columns for ordering search results, which can include model name + // and last updated timestamp with an optional "DESC" or "ASC" annotation, + // where "ASC" is the default. Tiebreaks are done by model name ASC. + OrderBy []string `json:"-" url:"order_by,omitempty"` + // Pagination token to go to the next page based on a previous search query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SearchModelsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SearchModelsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SearchModelsResponse struct { + // Pagination token to request the next page of models. + NextPageToken string `json:"next_page_token,omitempty"` + // Registered Models that match the search criteria. + RegisteredModels []Model `json:"registered_models,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SearchModelsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SearchModelsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SearchRuns struct { + // List of experiment IDs to search over. + ExperimentIds []string `json:"experiment_ids,omitempty"` + // A filter expression over params, metrics, and tags, that allows returning + // a subset of runs. The syntax is a subset of SQL that supports ANDing + // together binary operations between a param, metric, or tag and a + // constant. + // + // Example: `metrics.rmse < 1 and params.model_class = 'LogisticRegression'` + // + // You can select columns with special characters (hyphen, space, period, + // etc.) by using double quotes: `metrics."model class" = 'LinearRegression' + // and tags."user-name" = 'Tomas'` + // + // Supported operators are `=`, `!=`, `>`, `>=`, `<`, and `<=`. + Filter string `json:"filter,omitempty"` + // Maximum number of runs desired. Max threshold is 50000 + MaxResults int `json:"max_results,omitempty"` + // List of columns to be ordered by, including attributes, params, metrics, + // and tags with an optional "DESC" or "ASC" annotation, where "ASC" is the + // default. Example: ["params.input DESC", "metrics.alpha ASC", + // "metrics.rmse"] Tiebreaks are done by start_time DESC followed by run_id + // for runs with the same start time (and this is the default ordering + // criterion if order_by is not provided). + OrderBy []string `json:"order_by,omitempty"` + // Token for the current page of runs. + PageToken string `json:"page_token,omitempty"` + // Whether to display only active, only deleted, or all runs. Defaults to + // only active runs. + RunViewType SearchRunsRunViewType `json:"run_view_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SearchRuns) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SearchRuns) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SearchRunsResponse struct { + // Token for the next page of runs. + NextPageToken string `json:"next_page_token,omitempty"` + // Runs that match the search criteria. + Runs []Run `json:"runs,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SearchRunsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SearchRunsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Whether to display only active, only deleted, or all runs. Defaults to only +// active runs. +type SearchRunsRunViewType string + +const SearchRunsRunViewTypeActiveOnly SearchRunsRunViewType = `ACTIVE_ONLY` + +const SearchRunsRunViewTypeAll SearchRunsRunViewType = `ALL` + +const SearchRunsRunViewTypeDeletedOnly SearchRunsRunViewType = `DELETED_ONLY` + +// String representation for [fmt.Print] +func (f *SearchRunsRunViewType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SearchRunsRunViewType) Set(v string) error { + switch v { + case `ACTIVE_ONLY`, `ALL`, `DELETED_ONLY`: + *f = SearchRunsRunViewType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE_ONLY", "ALL", "DELETED_ONLY"`, v) + } +} + +// Type always returns SearchRunsRunViewType to satisfy [pflag.Value] interface +func (f *SearchRunsRunViewType) Type() string { + return "SearchRunsRunViewType" +} + +type SetExperimentTag struct { + // ID of the experiment under which to log the tag. Must be provided. + ExperimentId string `json:"experiment_id"` + // Name of the tag. Maximum size depends on storage backend. All storage + // backends are guaranteed to support key values up to 250 bytes in size. + Key string `json:"key"` + // String value of the tag being logged. Maximum size depends on storage + // backend. All storage backends are guaranteed to support key values up to + // 5000 bytes in size. + Value string `json:"value"` +} + +type SetExperimentTagResponse struct { +} + +type SetModelTagRequest struct { + // Name of the tag. Maximum size depends on storage backend. If a tag with + // this name already exists, its preexisting value will be replaced by the + // specified `value`. All storage backends are guaranteed to support key + // values up to 250 bytes in size. + Key string `json:"key"` + // Unique name of the model. + Name string `json:"name"` + // String value of the tag being logged. Maximum size depends on storage + // backend. All storage backends are guaranteed to support key values up to + // 5000 bytes in size. + Value string `json:"value"` +} + +type SetModelTagResponse struct { +} + +type SetModelVersionTagRequest struct { + // Name of the tag. Maximum size depends on storage backend. If a tag with + // this name already exists, its preexisting value will be replaced by the + // specified `value`. All storage backends are guaranteed to support key + // values up to 250 bytes in size. + Key string `json:"key"` + // Unique name of the model. + Name string `json:"name"` + // String value of the tag being logged. Maximum size depends on storage + // backend. All storage backends are guaranteed to support key values up to + // 5000 bytes in size. + Value string `json:"value"` + // Model version number. + Version string `json:"version"` +} + +type SetModelVersionTagResponse struct { +} + +type SetTag struct { + // Name of the tag. Maximum size depends on storage backend. All storage + // backends are guaranteed to support key values up to 250 bytes in size. + Key string `json:"key"` + // ID of the run under which to log the tag. Must be provided. + RunId string `json:"run_id,omitempty"` + // [Deprecated, use run_id instead] ID of the run under which to log the + // tag. This field will be removed in a future MLflow version. + RunUuid string `json:"run_uuid,omitempty"` + // String value of the tag being logged. Maximum size depends on storage + // backend. All storage backends are guaranteed to support key values up to + // 5000 bytes in size. + Value string `json:"value"` + + ForceSendFields []string `json:"-"` +} + +func (s *SetTag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SetTag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SetTagResponse struct { +} + +// Stage of the model version. Valid values are: +// +// * `None`: The initial stage of a model version. +// +// * `Staging`: Staging or pre-production stage. +// +// * `Production`: Production stage. +// +// * `Archived`: Archived stage. +type Stage string + +// Archived stage. +const StageArchived Stage = `Archived` + +// The initial stage of a model version. +const StageNone Stage = `None` + +// Production stage. +const StageProduction Stage = `Production` + +// Staging or pre-production stage. +const StageStaging Stage = `Staging` + +// String representation for [fmt.Print] +func (f *Stage) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Stage) Set(v string) error { + switch v { + case `Archived`, `None`, `Production`, `Staging`: + *f = Stage(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Archived", "None", "Production", "Staging"`, v) + } +} + +// Type always returns Stage to satisfy [pflag.Value] interface +func (f *Stage) Type() string { + return "Stage" +} + +// The status of the model version. Valid values are: * `PENDING_REGISTRATION`: +// Request to register a new model version is pending as server performs +// background tasks. +// +// * `FAILED_REGISTRATION`: Request to register a new model version has failed. +// +// * `READY`: Model version is ready for use. +type Status string + +// Request to register a new model version has failed. +const StatusFailedRegistration Status = `FAILED_REGISTRATION` + +// Request to register a new model version is pending as server performs +// background tasks. +const StatusPendingRegistration Status = `PENDING_REGISTRATION` + +// Model version is ready for use. +const StatusReady Status = `READY` + +// String representation for [fmt.Print] +func (f *Status) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Status) Set(v string) error { + switch v { + case `FAILED_REGISTRATION`, `PENDING_REGISTRATION`, `READY`: + *f = Status(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED_REGISTRATION", "PENDING_REGISTRATION", "READY"`, v) + } +} + +// Type always returns Status to satisfy [pflag.Value] interface +func (f *Status) Type() string { + return "Status" +} + +// Test webhook response object. +type TestRegistryWebhook struct { + // Body of the response from the webhook URL + Body string `json:"body,omitempty"` + // Status code returned by the webhook URL + StatusCode int `json:"status_code,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TestRegistryWebhook) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TestRegistryWebhook) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TestRegistryWebhookRequest struct { + // If `event` is specified, the test trigger uses the specified event. If + // `event` is not specified, the test trigger uses a randomly chosen event + // associated with the webhook. + Event RegistryWebhookEvent `json:"event,omitempty"` + // Webhook ID + Id string `json:"id"` +} + +type TestRegistryWebhookResponse struct { + // Test webhook response object. + Webhook *TestRegistryWebhook `json:"webhook,omitempty"` +} + +type TransitionModelVersionStageDatabricks struct { + // Specifies whether to archive all current model versions in the target + // stage. + ArchiveExistingVersions bool `json:"archive_existing_versions"` + // User-provided comment on the action. + Comment string `json:"comment,omitempty"` + // Name of the model. + Name string `json:"name"` + // Target stage of the transition. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + Stage Stage `json:"stage"` + // Version of the model. + Version string `json:"version"` + + ForceSendFields []string `json:"-"` +} + +func (s *TransitionModelVersionStageDatabricks) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TransitionModelVersionStageDatabricks) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Transition request details. +type TransitionRequest struct { + // Array of actions on the activity allowed for the current viewer. + AvailableActions []ActivityAction `json:"available_actions,omitempty"` + // User-provided comment associated with the transition request. + Comment string `json:"comment,omitempty"` + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + // Target stage of the transition (if the activity is stage transition + // related). Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + ToStage Stage `json:"to_stage,omitempty"` + // The username of the user that created the object. + UserId string `json:"user_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TransitionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TransitionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TransitionStageResponse struct { + ModelVersion *ModelVersionDatabricks `json:"model_version,omitempty"` +} + +type UpdateComment struct { + // User-provided comment on the action. + Comment string `json:"comment"` + // Unique identifier of an activity + Id string `json:"id"` +} + +type UpdateCommentResponse struct { + // Comment details. + Comment *CommentObject `json:"comment,omitempty"` +} + +type UpdateExperiment struct { + // ID of the associated experiment. + ExperimentId string `json:"experiment_id"` + // If provided, the experiment's name is changed to the new name. The new + // name must be unique. + NewName string `json:"new_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateExperiment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateExperiment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateExperimentResponse struct { +} + +type UpdateModelRequest struct { + // If provided, updates the description for this `registered_model`. + Description string `json:"description,omitempty"` + // Registered model unique name identifier. + Name string `json:"name"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateModelRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateModelRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateModelResponse struct { +} + +type UpdateModelVersionRequest struct { + // If provided, updates the description for this `registered_model`. + Description string `json:"description,omitempty"` + // Name of the registered model + Name string `json:"name"` + // Model version number + Version string `json:"version"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateModelVersionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateModelVersionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateModelVersionResponse struct { +} + +type UpdateRegistryWebhook struct { + // User-specified description for the webhook. + Description string `json:"description,omitempty"` + // Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A + // new model version was created for the associated model. + // + // * `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was + // changed. + // + // * `TRANSITION_REQUEST_CREATED`: A user requested a model version’s + // stage be transitioned. + // + // * `COMMENT_CREATED`: A user wrote a comment on a registered model. + // + // * `REGISTERED_MODEL_CREATED`: A new registered model was created. This + // event type can only be specified for a registry-wide webhook, which can + // be created by not specifying a model name in the create request. + // + // * `MODEL_VERSION_TAG_SET`: A user set a tag on the model version. + // + // * `MODEL_VERSION_TRANSITIONED_TO_STAGING`: A model version was + // transitioned to staging. + // + // * `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION`: A model version was + // transitioned to production. + // + // * `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED`: A model version was archived. + // + // * `TRANSITION_REQUEST_TO_STAGING_CREATED`: A user requested a model + // version be transitioned to staging. + // + // * `TRANSITION_REQUEST_TO_PRODUCTION_CREATED`: A user requested a model + // version be transitioned to production. + // + // * `TRANSITION_REQUEST_TO_ARCHIVED_CREATED`: A user requested a model + // version be archived. + Events []RegistryWebhookEvent `json:"events,omitempty"` + + HttpUrlSpec *HttpUrlSpec `json:"http_url_spec,omitempty"` + // Webhook ID + Id string `json:"id"` + + JobSpec *JobSpec `json:"job_spec,omitempty"` + // Enable or disable triggering the webhook, or put the webhook into test + // mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an + // associated event happens. + // + // * `DISABLED`: Webhook is not triggered. + // + // * `TEST_MODE`: Webhook can be triggered through the test endpoint, but is + // not triggered on a real event. + Status RegistryWebhookStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateRegistryWebhook) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateRegistryWebhook) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateRun struct { + // Unix timestamp in milliseconds of when the run ended. + EndTime int64 `json:"end_time,omitempty"` + // ID of the run to update. Must be provided. + RunId string `json:"run_id,omitempty"` + // [Deprecated, use run_id instead] ID of the run to update.. This field + // will be removed in a future MLflow version. + RunUuid string `json:"run_uuid,omitempty"` + // Updated status of the run. + Status UpdateRunStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateRun) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateRun) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateRunResponse struct { + // Updated metadata of the run. + RunInfo *RunInfo `json:"run_info,omitempty"` +} + +// Updated status of the run. +type UpdateRunStatus string + +const UpdateRunStatusFailed UpdateRunStatus = `FAILED` + +const UpdateRunStatusFinished UpdateRunStatus = `FINISHED` + +const UpdateRunStatusKilled UpdateRunStatus = `KILLED` + +const UpdateRunStatusRunning UpdateRunStatus = `RUNNING` + +const UpdateRunStatusScheduled UpdateRunStatus = `SCHEDULED` + +// String representation for [fmt.Print] +func (f *UpdateRunStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpdateRunStatus) Set(v string) error { + switch v { + case `FAILED`, `FINISHED`, `KILLED`, `RUNNING`, `SCHEDULED`: + *f = UpdateRunStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED", "FINISHED", "KILLED", "RUNNING", "SCHEDULED"`, v) + } +} + +// Type always returns UpdateRunStatus to satisfy [pflag.Value] interface +func (f *UpdateRunStatus) Type() string { + return "UpdateRunStatus" +} + +type UpdateWebhookResponse struct { +} diff --git a/oauth2/v2preview/api.go b/oauth2/v2preview/api.go new file mode 100755 index 000000000..1d9e44972 --- /dev/null +++ b/oauth2/v2preview/api.go @@ -0,0 +1,541 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Account Federation Policy Preview, Custom App Integration Preview, O Auth Published Apps Preview, Published App Integration Preview, Service Principal Federation Policy Preview, Service Principal Secrets Preview, etc. +package oauth2preview + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" +) + +type AccountFederationPolicyPreviewInterface interface { + + // Create account federation policy. + Create(ctx context.Context, request CreateAccountFederationPolicyRequest) (*FederationPolicy, error) + + // Delete account federation policy. + Delete(ctx context.Context, request DeleteAccountFederationPolicyRequest) error + + // Delete account federation policy. + DeleteByPolicyId(ctx context.Context, policyId string) error + + // Get account federation policy. + Get(ctx context.Context, request GetAccountFederationPolicyRequest) (*FederationPolicy, error) + + // Get account federation policy. + GetByPolicyId(ctx context.Context, policyId string) (*FederationPolicy, error) + + // List account federation policies. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAccountFederationPoliciesRequest) listing.Iterator[FederationPolicy] + + // List account federation policies. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAccountFederationPoliciesRequest) ([]FederationPolicy, error) + + // Update account federation policy. + Update(ctx context.Context, request UpdateAccountFederationPolicyRequest) (*FederationPolicy, error) +} + +func NewAccountFederationPolicyPreview(client *client.DatabricksClient) *AccountFederationPolicyPreviewAPI { + return &AccountFederationPolicyPreviewAPI{ + accountFederationPolicyPreviewImpl: accountFederationPolicyPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage account federation policies. +// +// Account federation policies allow users and service principals in your +// Databricks account to securely access Databricks APIs using tokens from your +// trusted identity providers (IdPs). +// +// With token federation, your users and service principals can exchange tokens +// from your IdP for Databricks OAuth tokens, which can be used to access +// Databricks APIs. Token federation eliminates the need to manage Databricks +// secrets, and allows you to centralize management of token issuance policies +// in your IdP. Databricks token federation is typically used in combination +// with [SCIM], so users in your IdP are synchronized into your Databricks +// account. +// +// Token federation is configured in your Databricks account using an account +// federation policy. An account federation policy specifies: * which IdP, or +// issuer, your Databricks account should accept tokens from * how to determine +// which Databricks user, or subject, a token is issued for +// +// To configure a federation policy, you provide the following: * The required +// token __issuer__, as specified in the “iss” claim of your tokens. The +// issuer is an https URL that identifies your IdP. * The allowed token +// __audiences__, as specified in the “aud” claim of your tokens. This +// identifier is intended to represent the recipient of the token. As long as +// the audience in the token matches at least one audience in the policy, the +// token is considered a match. If unspecified, the default value is your +// Databricks account id. * The __subject claim__, which indicates which token +// claim contains the Databricks username of the user the token was issued for. +// If unspecified, the default value is “sub”. * Optionally, the public keys +// used to validate the signature of your tokens, in JWKS format. If unspecified +// (recommended), Databricks automatically fetches the public keys from your +// issuer’s well known endpoint. Databricks strongly recommends relying on +// your issuer’s well known endpoint for discovering public keys. +// +// An example federation policy is: ``` issuer: "https://idp.mycompany.com/oidc" +// audiences: ["databricks"] subject_claim: "sub" ``` +// +// An example JWT token body that matches this policy and could be used to +// authenticate to Databricks as user `username@mycompany.com` is: ``` { "iss": +// "https://idp.mycompany.com/oidc", "aud": "databricks", "sub": +// "username@mycompany.com" } ``` +// +// You may also need to configure your IdP to generate tokens for your users to +// exchange with Databricks, if your users do not already have the ability to +// generate tokens that are compatible with your federation policy. +// +// You do not need to configure an OAuth application in Databricks to use token +// federation. +// +// [SCIM]: https://docs.databricks.com/admin/users-groups/scim/index.html +type AccountFederationPolicyPreviewAPI struct { + accountFederationPolicyPreviewImpl +} + +// Delete account federation policy. +func (a *AccountFederationPolicyPreviewAPI) DeleteByPolicyId(ctx context.Context, policyId string) error { + return a.accountFederationPolicyPreviewImpl.Delete(ctx, DeleteAccountFederationPolicyRequest{ + PolicyId: policyId, + }) +} + +// Get account federation policy. +func (a *AccountFederationPolicyPreviewAPI) GetByPolicyId(ctx context.Context, policyId string) (*FederationPolicy, error) { + return a.accountFederationPolicyPreviewImpl.Get(ctx, GetAccountFederationPolicyRequest{ + PolicyId: policyId, + }) +} + +type CustomAppIntegrationPreviewInterface interface { + + // Create Custom OAuth App Integration. + // + // Create Custom OAuth App Integration. + // + // You can retrieve the custom OAuth app integration via + // :method:CustomAppIntegration/get. + Create(ctx context.Context, request CreateCustomAppIntegration) (*CreateCustomAppIntegrationOutput, error) + + // Delete Custom OAuth App Integration. + // + // Delete an existing Custom OAuth App Integration. You can retrieve the custom + // OAuth app integration via :method:CustomAppIntegration/get. + Delete(ctx context.Context, request DeleteCustomAppIntegrationRequest) error + + // Delete Custom OAuth App Integration. + // + // Delete an existing Custom OAuth App Integration. You can retrieve the custom + // OAuth app integration via :method:CustomAppIntegration/get. + DeleteByIntegrationId(ctx context.Context, integrationId string) error + + // Get OAuth Custom App Integration. + // + // Gets the Custom OAuth App Integration for the given integration id. + Get(ctx context.Context, request GetCustomAppIntegrationRequest) (*GetCustomAppIntegrationOutput, error) + + // Get OAuth Custom App Integration. + // + // Gets the Custom OAuth App Integration for the given integration id. + GetByIntegrationId(ctx context.Context, integrationId string) (*GetCustomAppIntegrationOutput, error) + + // Get custom oauth app integrations. + // + // Get the list of custom OAuth app integrations for the specified Databricks + // account + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListCustomAppIntegrationsRequest) listing.Iterator[GetCustomAppIntegrationOutput] + + // Get custom oauth app integrations. + // + // Get the list of custom OAuth app integrations for the specified Databricks + // account + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListCustomAppIntegrationsRequest) ([]GetCustomAppIntegrationOutput, error) + + // Updates Custom OAuth App Integration. + // + // Updates an existing custom OAuth App Integration. You can retrieve the custom + // OAuth app integration via :method:CustomAppIntegration/get. + Update(ctx context.Context, request UpdateCustomAppIntegration) error +} + +func NewCustomAppIntegrationPreview(client *client.DatabricksClient) *CustomAppIntegrationPreviewAPI { + return &CustomAppIntegrationPreviewAPI{ + customAppIntegrationPreviewImpl: customAppIntegrationPreviewImpl{ + client: client, + }, + } +} + +// These APIs enable administrators to manage custom OAuth app integrations, +// which is required for adding/using Custom OAuth App Integration like Tableau +// Cloud for Databricks in AWS cloud. +type CustomAppIntegrationPreviewAPI struct { + customAppIntegrationPreviewImpl +} + +// Delete Custom OAuth App Integration. +// +// Delete an existing Custom OAuth App Integration. You can retrieve the custom +// OAuth app integration via :method:CustomAppIntegration/get. +func (a *CustomAppIntegrationPreviewAPI) DeleteByIntegrationId(ctx context.Context, integrationId string) error { + return a.customAppIntegrationPreviewImpl.Delete(ctx, DeleteCustomAppIntegrationRequest{ + IntegrationId: integrationId, + }) +} + +// Get OAuth Custom App Integration. +// +// Gets the Custom OAuth App Integration for the given integration id. +func (a *CustomAppIntegrationPreviewAPI) GetByIntegrationId(ctx context.Context, integrationId string) (*GetCustomAppIntegrationOutput, error) { + return a.customAppIntegrationPreviewImpl.Get(ctx, GetCustomAppIntegrationRequest{ + IntegrationId: integrationId, + }) +} + +type OAuthPublishedAppsPreviewInterface interface { + + // Get all the published OAuth apps. + // + // Get all the available published OAuth apps in Databricks. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListOAuthPublishedAppsRequest) listing.Iterator[PublishedAppOutput] + + // Get all the published OAuth apps. + // + // Get all the available published OAuth apps in Databricks. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListOAuthPublishedAppsRequest) ([]PublishedAppOutput, error) +} + +func NewOAuthPublishedAppsPreview(client *client.DatabricksClient) *OAuthPublishedAppsPreviewAPI { + return &OAuthPublishedAppsPreviewAPI{ + oAuthPublishedAppsPreviewImpl: oAuthPublishedAppsPreviewImpl{ + client: client, + }, + } +} + +// These APIs enable administrators to view all the available published OAuth +// applications in Databricks. Administrators can add the published OAuth +// applications to their account through the OAuth Published App Integration +// APIs. +type OAuthPublishedAppsPreviewAPI struct { + oAuthPublishedAppsPreviewImpl +} + +type PublishedAppIntegrationPreviewInterface interface { + + // Create Published OAuth App Integration. + // + // Create Published OAuth App Integration. + // + // You can retrieve the published OAuth app integration via + // :method:PublishedAppIntegration/get. + Create(ctx context.Context, request CreatePublishedAppIntegration) (*CreatePublishedAppIntegrationOutput, error) + + // Delete Published OAuth App Integration. + // + // Delete an existing Published OAuth App Integration. You can retrieve the + // published OAuth app integration via :method:PublishedAppIntegration/get. + Delete(ctx context.Context, request DeletePublishedAppIntegrationRequest) error + + // Delete Published OAuth App Integration. + // + // Delete an existing Published OAuth App Integration. You can retrieve the + // published OAuth app integration via :method:PublishedAppIntegration/get. + DeleteByIntegrationId(ctx context.Context, integrationId string) error + + // Get OAuth Published App Integration. + // + // Gets the Published OAuth App Integration for the given integration id. + Get(ctx context.Context, request GetPublishedAppIntegrationRequest) (*GetPublishedAppIntegrationOutput, error) + + // Get OAuth Published App Integration. + // + // Gets the Published OAuth App Integration for the given integration id. + GetByIntegrationId(ctx context.Context, integrationId string) (*GetPublishedAppIntegrationOutput, error) + + // Get published oauth app integrations. + // + // Get the list of published OAuth app integrations for the specified Databricks + // account + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListPublishedAppIntegrationsRequest) listing.Iterator[GetPublishedAppIntegrationOutput] + + // Get published oauth app integrations. + // + // Get the list of published OAuth app integrations for the specified Databricks + // account + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListPublishedAppIntegrationsRequest) ([]GetPublishedAppIntegrationOutput, error) + + // Updates Published OAuth App Integration. + // + // Updates an existing published OAuth App Integration. You can retrieve the + // published OAuth app integration via :method:PublishedAppIntegration/get. + Update(ctx context.Context, request UpdatePublishedAppIntegration) error +} + +func NewPublishedAppIntegrationPreview(client *client.DatabricksClient) *PublishedAppIntegrationPreviewAPI { + return &PublishedAppIntegrationPreviewAPI{ + publishedAppIntegrationPreviewImpl: publishedAppIntegrationPreviewImpl{ + client: client, + }, + } +} + +// These APIs enable administrators to manage published OAuth app integrations, +// which is required for adding/using Published OAuth App Integration like +// Tableau Desktop for Databricks in AWS cloud. +type PublishedAppIntegrationPreviewAPI struct { + publishedAppIntegrationPreviewImpl +} + +// Delete Published OAuth App Integration. +// +// Delete an existing Published OAuth App Integration. You can retrieve the +// published OAuth app integration via :method:PublishedAppIntegration/get. +func (a *PublishedAppIntegrationPreviewAPI) DeleteByIntegrationId(ctx context.Context, integrationId string) error { + return a.publishedAppIntegrationPreviewImpl.Delete(ctx, DeletePublishedAppIntegrationRequest{ + IntegrationId: integrationId, + }) +} + +// Get OAuth Published App Integration. +// +// Gets the Published OAuth App Integration for the given integration id. +func (a *PublishedAppIntegrationPreviewAPI) GetByIntegrationId(ctx context.Context, integrationId string) (*GetPublishedAppIntegrationOutput, error) { + return a.publishedAppIntegrationPreviewImpl.Get(ctx, GetPublishedAppIntegrationRequest{ + IntegrationId: integrationId, + }) +} + +type ServicePrincipalFederationPolicyPreviewInterface interface { + + // Create service principal federation policy. + Create(ctx context.Context, request CreateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) + + // Delete service principal federation policy. + Delete(ctx context.Context, request DeleteServicePrincipalFederationPolicyRequest) error + + // Delete service principal federation policy. + DeleteByServicePrincipalIdAndPolicyId(ctx context.Context, servicePrincipalId int64, policyId string) error + + // Get service principal federation policy. + Get(ctx context.Context, request GetServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) + + // Get service principal federation policy. + GetByServicePrincipalIdAndPolicyId(ctx context.Context, servicePrincipalId int64, policyId string) (*FederationPolicy, error) + + // List service principal federation policies. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) listing.Iterator[FederationPolicy] + + // List service principal federation policies. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) ([]FederationPolicy, error) + + // List service principal federation policies. + ListByServicePrincipalId(ctx context.Context, servicePrincipalId int64) (*ListFederationPoliciesResponse, error) + + // Update service principal federation policy. + Update(ctx context.Context, request UpdateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) +} + +func NewServicePrincipalFederationPolicyPreview(client *client.DatabricksClient) *ServicePrincipalFederationPolicyPreviewAPI { + return &ServicePrincipalFederationPolicyPreviewAPI{ + servicePrincipalFederationPolicyPreviewImpl: servicePrincipalFederationPolicyPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage service principal federation policies. +// +// Service principal federation, also known as Workload Identity Federation, +// allows your automated workloads running outside of Databricks to securely +// access Databricks APIs without the need for Databricks secrets. With Workload +// Identity Federation, your application (or workload) authenticates to +// Databricks as a Databricks service principal, using tokens provided by the +// workload runtime. +// +// Databricks strongly recommends using Workload Identity Federation to +// authenticate to Databricks from automated workloads, over alternatives such +// as OAuth client secrets or Personal Access Tokens, whenever possible. +// Workload Identity Federation is supported by many popular services, including +// Github Actions, Azure DevOps, GitLab, Terraform Cloud, and Kubernetes +// clusters, among others. +// +// Workload identity federation is configured in your Databricks account using a +// service principal federation policy. A service principal federation policy +// specifies: * which IdP, or issuer, the service principal is allowed to +// authenticate from * which workload identity, or subject, is allowed to +// authenticate as the Databricks service principal +// +// To configure a federation policy, you provide the following: * The required +// token __issuer__, as specified in the “iss” claim of workload identity +// tokens. The issuer is an https URL that identifies the workload identity +// provider. * The required token __subject__, as specified in the “sub” +// claim of workload identity tokens. The subject uniquely identifies the +// workload in the workload runtime environment. * The allowed token +// __audiences__, as specified in the “aud” claim of workload identity +// tokens. The audience is intended to represent the recipient of the token. As +// long as the audience in the token matches at least one audience in the +// policy, the token is considered a match. If unspecified, the default value is +// your Databricks account id. * Optionally, the public keys used to validate +// the signature of the workload identity tokens, in JWKS format. If unspecified +// (recommended), Databricks automatically fetches the public keys from the +// issuer’s well known endpoint. Databricks strongly recommends relying on the +// issuer’s well known endpoint for discovering public keys. +// +// An example service principal federation policy, for a Github Actions +// workload, is: ``` issuer: "https://token.actions.githubusercontent.com" +// audiences: ["https://github.com/my-github-org"] subject: +// "repo:my-github-org/my-repo:environment:prod" ``` +// +// An example JWT token body that matches this policy and could be used to +// authenticate to Databricks is: ``` { "iss": +// "https://token.actions.githubusercontent.com", "aud": +// "https://github.com/my-github-org", "sub": +// "repo:my-github-org/my-repo:environment:prod" } ``` +// +// You may also need to configure the workload runtime to generate tokens for +// your workloads. +// +// You do not need to configure an OAuth application in Databricks to use token +// federation. +type ServicePrincipalFederationPolicyPreviewAPI struct { + servicePrincipalFederationPolicyPreviewImpl +} + +// Delete service principal federation policy. +func (a *ServicePrincipalFederationPolicyPreviewAPI) DeleteByServicePrincipalIdAndPolicyId(ctx context.Context, servicePrincipalId int64, policyId string) error { + return a.servicePrincipalFederationPolicyPreviewImpl.Delete(ctx, DeleteServicePrincipalFederationPolicyRequest{ + ServicePrincipalId: servicePrincipalId, + PolicyId: policyId, + }) +} + +// Get service principal federation policy. +func (a *ServicePrincipalFederationPolicyPreviewAPI) GetByServicePrincipalIdAndPolicyId(ctx context.Context, servicePrincipalId int64, policyId string) (*FederationPolicy, error) { + return a.servicePrincipalFederationPolicyPreviewImpl.Get(ctx, GetServicePrincipalFederationPolicyRequest{ + ServicePrincipalId: servicePrincipalId, + PolicyId: policyId, + }) +} + +// List service principal federation policies. +func (a *ServicePrincipalFederationPolicyPreviewAPI) ListByServicePrincipalId(ctx context.Context, servicePrincipalId int64) (*ListFederationPoliciesResponse, error) { + return a.servicePrincipalFederationPolicyPreviewImpl.internalList(ctx, ListServicePrincipalFederationPoliciesRequest{ + ServicePrincipalId: servicePrincipalId, + }) +} + +type ServicePrincipalSecretsPreviewInterface interface { + + // Create service principal secret. + // + // Create a secret for the given service principal. + Create(ctx context.Context, request CreateServicePrincipalSecretRequest) (*CreateServicePrincipalSecretResponse, error) + + // Delete service principal secret. + // + // Delete a secret from the given service principal. + Delete(ctx context.Context, request DeleteServicePrincipalSecretRequest) error + + // Delete service principal secret. + // + // Delete a secret from the given service principal. + DeleteByServicePrincipalIdAndSecretId(ctx context.Context, servicePrincipalId int64, secretId string) error + + // List service principal secrets. + // + // List all secrets associated with the given service principal. This operation + // only returns information about the secrets themselves and does not include + // the secret values. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListServicePrincipalSecretsRequest) listing.Iterator[SecretInfo] + + // List service principal secrets. + // + // List all secrets associated with the given service principal. This operation + // only returns information about the secrets themselves and does not include + // the secret values. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListServicePrincipalSecretsRequest) ([]SecretInfo, error) + + // List service principal secrets. + // + // List all secrets associated with the given service principal. This operation + // only returns information about the secrets themselves and does not include + // the secret values. + ListByServicePrincipalId(ctx context.Context, servicePrincipalId int64) (*ListServicePrincipalSecretsResponse, error) +} + +func NewServicePrincipalSecretsPreview(client *client.DatabricksClient) *ServicePrincipalSecretsPreviewAPI { + return &ServicePrincipalSecretsPreviewAPI{ + servicePrincipalSecretsPreviewImpl: servicePrincipalSecretsPreviewImpl{ + client: client, + }, + } +} + +// These APIs enable administrators to manage service principal secrets. +// +// You can use the generated secrets to obtain OAuth access tokens for a service +// principal, which can then be used to access Databricks Accounts and Workspace +// APIs. For more information, see [Authentication using OAuth tokens for +// service principals], +// +// In addition, the generated secrets can be used to configure the Databricks +// Terraform Provider to authenticate with the service principal. For more +// information, see [Databricks Terraform Provider]. +// +// [Authentication using OAuth tokens for service principals]: https://docs.databricks.com/dev-tools/authentication-oauth.html +// [Databricks Terraform Provider]: https://github.com/databricks/terraform-provider-databricks/blob/master/docs/index.md#authenticating-with-service-principal +type ServicePrincipalSecretsPreviewAPI struct { + servicePrincipalSecretsPreviewImpl +} + +// Delete service principal secret. +// +// Delete a secret from the given service principal. +func (a *ServicePrincipalSecretsPreviewAPI) DeleteByServicePrincipalIdAndSecretId(ctx context.Context, servicePrincipalId int64, secretId string) error { + return a.servicePrincipalSecretsPreviewImpl.Delete(ctx, DeleteServicePrincipalSecretRequest{ + ServicePrincipalId: servicePrincipalId, + SecretId: secretId, + }) +} + +// List service principal secrets. +// +// List all secrets associated with the given service principal. This operation +// only returns information about the secrets themselves and does not include +// the secret values. +func (a *ServicePrincipalSecretsPreviewAPI) ListByServicePrincipalId(ctx context.Context, servicePrincipalId int64) (*ListServicePrincipalSecretsResponse, error) { + return a.servicePrincipalSecretsPreviewImpl.internalList(ctx, ListServicePrincipalSecretsRequest{ + ServicePrincipalId: servicePrincipalId, + }) +} diff --git a/oauth2/v2preview/client.go b/oauth2/v2preview/client.go new file mode 100755 index 000000000..fa5d1190c --- /dev/null +++ b/oauth2/v2preview/client.go @@ -0,0 +1,190 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package oauth2preview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" +) + +type AccountFederationPolicyPreviewClient struct { + AccountFederationPolicyPreviewInterface + + Config *config.Config +} + +func NewAccountFederationPolicyPreviewClient(cfg *config.Config) (*AccountFederationPolicyPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &AccountFederationPolicyPreviewClient{ + Config: cfg, + AccountFederationPolicyPreviewInterface: NewAccountFederationPolicyPreview(apiClient), + }, nil +} + +type CustomAppIntegrationPreviewClient struct { + CustomAppIntegrationPreviewInterface + + Config *config.Config +} + +func NewCustomAppIntegrationPreviewClient(cfg *config.Config) (*CustomAppIntegrationPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &CustomAppIntegrationPreviewClient{ + Config: cfg, + CustomAppIntegrationPreviewInterface: NewCustomAppIntegrationPreview(apiClient), + }, nil +} + +type OAuthPublishedAppsPreviewClient struct { + OAuthPublishedAppsPreviewInterface + + Config *config.Config +} + +func NewOAuthPublishedAppsPreviewClient(cfg *config.Config) (*OAuthPublishedAppsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &OAuthPublishedAppsPreviewClient{ + Config: cfg, + OAuthPublishedAppsPreviewInterface: NewOAuthPublishedAppsPreview(apiClient), + }, nil +} + +type PublishedAppIntegrationPreviewClient struct { + PublishedAppIntegrationPreviewInterface + + Config *config.Config +} + +func NewPublishedAppIntegrationPreviewClient(cfg *config.Config) (*PublishedAppIntegrationPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &PublishedAppIntegrationPreviewClient{ + Config: cfg, + PublishedAppIntegrationPreviewInterface: NewPublishedAppIntegrationPreview(apiClient), + }, nil +} + +type ServicePrincipalFederationPolicyPreviewClient struct { + ServicePrincipalFederationPolicyPreviewInterface + + Config *config.Config +} + +func NewServicePrincipalFederationPolicyPreviewClient(cfg *config.Config) (*ServicePrincipalFederationPolicyPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &ServicePrincipalFederationPolicyPreviewClient{ + Config: cfg, + ServicePrincipalFederationPolicyPreviewInterface: NewServicePrincipalFederationPolicyPreview(apiClient), + }, nil +} + +type ServicePrincipalSecretsPreviewClient struct { + ServicePrincipalSecretsPreviewInterface + + Config *config.Config +} + +func NewServicePrincipalSecretsPreviewClient(cfg *config.Config) (*ServicePrincipalSecretsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &ServicePrincipalSecretsPreviewClient{ + Config: cfg, + ServicePrincipalSecretsPreviewInterface: NewServicePrincipalSecretsPreview(apiClient), + }, nil +} diff --git a/oauth2/v2preview/impl.go b/oauth2/v2preview/impl.go new file mode 100755 index 000000000..bc755cdc7 --- /dev/null +++ b/oauth2/v2preview/impl.go @@ -0,0 +1,507 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package oauth2preview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" + "golang.org/x/exp/slices" +) + +// unexported type that holds implementations of just AccountFederationPolicyPreview API methods +type accountFederationPolicyPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *accountFederationPolicyPreviewImpl) Create(ctx context.Context, request CreateAccountFederationPolicyRequest) (*FederationPolicy, error) { + var federationPolicy FederationPolicy + path := fmt.Sprintf("/api/2.0preview/accounts/%v/federationPolicies", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + if request.PolicyId != "" || slices.Contains(request.ForceSendFields, "PolicyId") { + queryParams["policy_id"] = request.PolicyId + } + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Policy, &federationPolicy) + return &federationPolicy, err +} + +func (a *accountFederationPolicyPreviewImpl) Delete(ctx context.Context, request DeleteAccountFederationPolicyRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *accountFederationPolicyPreviewImpl) Get(ctx context.Context, request GetAccountFederationPolicyRequest) (*FederationPolicy, error) { + var federationPolicy FederationPolicy + path := fmt.Sprintf("/api/2.0preview/accounts/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &federationPolicy) + return &federationPolicy, err +} + +// List account federation policies. +func (a *accountFederationPolicyPreviewImpl) List(ctx context.Context, request ListAccountFederationPoliciesRequest) listing.Iterator[FederationPolicy] { + + getNextPage := func(ctx context.Context, req ListAccountFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListFederationPoliciesResponse) []FederationPolicy { + return resp.Policies + } + getNextReq := func(resp *ListFederationPoliciesResponse) *ListAccountFederationPoliciesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List account federation policies. +func (a *accountFederationPolicyPreviewImpl) ListAll(ctx context.Context, request ListAccountFederationPoliciesRequest) ([]FederationPolicy, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[FederationPolicy](ctx, iterator) +} +func (a *accountFederationPolicyPreviewImpl) internalList(ctx context.Context, request ListAccountFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { + var listFederationPoliciesResponse ListFederationPoliciesResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/federationPolicies", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listFederationPoliciesResponse) + return &listFederationPoliciesResponse, err +} + +func (a *accountFederationPolicyPreviewImpl) Update(ctx context.Context, request UpdateAccountFederationPolicyRequest) (*FederationPolicy, error) { + var federationPolicy FederationPolicy + path := fmt.Sprintf("/api/2.0preview/accounts/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) + if request.UpdateMask != "" || slices.Contains(request.ForceSendFields, "UpdateMask") { + queryParams["update_mask"] = request.UpdateMask + } + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.Policy, &federationPolicy) + return &federationPolicy, err +} + +// unexported type that holds implementations of just CustomAppIntegrationPreview API methods +type customAppIntegrationPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *customAppIntegrationPreviewImpl) Create(ctx context.Context, request CreateCustomAppIntegration) (*CreateCustomAppIntegrationOutput, error) { + var createCustomAppIntegrationOutput CreateCustomAppIntegrationOutput + path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/custom-app-integrations", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createCustomAppIntegrationOutput) + return &createCustomAppIntegrationOutput, err +} + +func (a *customAppIntegrationPreviewImpl) Delete(ctx context.Context, request DeleteCustomAppIntegrationRequest) error { + var deleteCustomAppIntegrationOutput DeleteCustomAppIntegrationOutput + path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/custom-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteCustomAppIntegrationOutput) + return err +} + +func (a *customAppIntegrationPreviewImpl) Get(ctx context.Context, request GetCustomAppIntegrationRequest) (*GetCustomAppIntegrationOutput, error) { + var getCustomAppIntegrationOutput GetCustomAppIntegrationOutput + path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/custom-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getCustomAppIntegrationOutput) + return &getCustomAppIntegrationOutput, err +} + +// Get custom oauth app integrations. +// +// Get the list of custom OAuth app integrations for the specified Databricks +// account +func (a *customAppIntegrationPreviewImpl) List(ctx context.Context, request ListCustomAppIntegrationsRequest) listing.Iterator[GetCustomAppIntegrationOutput] { + + getNextPage := func(ctx context.Context, req ListCustomAppIntegrationsRequest) (*GetCustomAppIntegrationsOutput, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *GetCustomAppIntegrationsOutput) []GetCustomAppIntegrationOutput { + return resp.Apps + } + getNextReq := func(resp *GetCustomAppIntegrationsOutput) *ListCustomAppIntegrationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get custom oauth app integrations. +// +// Get the list of custom OAuth app integrations for the specified Databricks +// account +func (a *customAppIntegrationPreviewImpl) ListAll(ctx context.Context, request ListCustomAppIntegrationsRequest) ([]GetCustomAppIntegrationOutput, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[GetCustomAppIntegrationOutput](ctx, iterator) +} +func (a *customAppIntegrationPreviewImpl) internalList(ctx context.Context, request ListCustomAppIntegrationsRequest) (*GetCustomAppIntegrationsOutput, error) { + var getCustomAppIntegrationsOutput GetCustomAppIntegrationsOutput + path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/custom-app-integrations", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getCustomAppIntegrationsOutput) + return &getCustomAppIntegrationsOutput, err +} + +func (a *customAppIntegrationPreviewImpl) Update(ctx context.Context, request UpdateCustomAppIntegration) error { + var updateCustomAppIntegrationOutput UpdateCustomAppIntegrationOutput + path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/custom-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateCustomAppIntegrationOutput) + return err +} + +// unexported type that holds implementations of just OAuthPublishedAppsPreview API methods +type oAuthPublishedAppsPreviewImpl struct { + client *client.DatabricksClient +} + +// Get all the published OAuth apps. +// +// Get all the available published OAuth apps in Databricks. +func (a *oAuthPublishedAppsPreviewImpl) List(ctx context.Context, request ListOAuthPublishedAppsRequest) listing.Iterator[PublishedAppOutput] { + + getNextPage := func(ctx context.Context, req ListOAuthPublishedAppsRequest) (*GetPublishedAppsOutput, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *GetPublishedAppsOutput) []PublishedAppOutput { + return resp.Apps + } + getNextReq := func(resp *GetPublishedAppsOutput) *ListOAuthPublishedAppsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get all the published OAuth apps. +// +// Get all the available published OAuth apps in Databricks. +func (a *oAuthPublishedAppsPreviewImpl) ListAll(ctx context.Context, request ListOAuthPublishedAppsRequest) ([]PublishedAppOutput, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[PublishedAppOutput](ctx, iterator) +} +func (a *oAuthPublishedAppsPreviewImpl) internalList(ctx context.Context, request ListOAuthPublishedAppsRequest) (*GetPublishedAppsOutput, error) { + var getPublishedAppsOutput GetPublishedAppsOutput + path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-apps", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedAppsOutput) + return &getPublishedAppsOutput, err +} + +// unexported type that holds implementations of just PublishedAppIntegrationPreview API methods +type publishedAppIntegrationPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *publishedAppIntegrationPreviewImpl) Create(ctx context.Context, request CreatePublishedAppIntegration) (*CreatePublishedAppIntegrationOutput, error) { + var createPublishedAppIntegrationOutput CreatePublishedAppIntegrationOutput + path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-app-integrations", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createPublishedAppIntegrationOutput) + return &createPublishedAppIntegrationOutput, err +} + +func (a *publishedAppIntegrationPreviewImpl) Delete(ctx context.Context, request DeletePublishedAppIntegrationRequest) error { + var deletePublishedAppIntegrationOutput DeletePublishedAppIntegrationOutput + path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deletePublishedAppIntegrationOutput) + return err +} + +func (a *publishedAppIntegrationPreviewImpl) Get(ctx context.Context, request GetPublishedAppIntegrationRequest) (*GetPublishedAppIntegrationOutput, error) { + var getPublishedAppIntegrationOutput GetPublishedAppIntegrationOutput + path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedAppIntegrationOutput) + return &getPublishedAppIntegrationOutput, err +} + +// Get published oauth app integrations. +// +// Get the list of published OAuth app integrations for the specified Databricks +// account +func (a *publishedAppIntegrationPreviewImpl) List(ctx context.Context, request ListPublishedAppIntegrationsRequest) listing.Iterator[GetPublishedAppIntegrationOutput] { + + getNextPage := func(ctx context.Context, req ListPublishedAppIntegrationsRequest) (*GetPublishedAppIntegrationsOutput, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *GetPublishedAppIntegrationsOutput) []GetPublishedAppIntegrationOutput { + return resp.Apps + } + getNextReq := func(resp *GetPublishedAppIntegrationsOutput) *ListPublishedAppIntegrationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get published oauth app integrations. +// +// Get the list of published OAuth app integrations for the specified Databricks +// account +func (a *publishedAppIntegrationPreviewImpl) ListAll(ctx context.Context, request ListPublishedAppIntegrationsRequest) ([]GetPublishedAppIntegrationOutput, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[GetPublishedAppIntegrationOutput](ctx, iterator) +} +func (a *publishedAppIntegrationPreviewImpl) internalList(ctx context.Context, request ListPublishedAppIntegrationsRequest) (*GetPublishedAppIntegrationsOutput, error) { + var getPublishedAppIntegrationsOutput GetPublishedAppIntegrationsOutput + path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-app-integrations", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedAppIntegrationsOutput) + return &getPublishedAppIntegrationsOutput, err +} + +func (a *publishedAppIntegrationPreviewImpl) Update(ctx context.Context, request UpdatePublishedAppIntegration) error { + var updatePublishedAppIntegrationOutput UpdatePublishedAppIntegrationOutput + path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updatePublishedAppIntegrationOutput) + return err +} + +// unexported type that holds implementations of just ServicePrincipalFederationPolicyPreview API methods +type servicePrincipalFederationPolicyPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *servicePrincipalFederationPolicyPreviewImpl) Create(ctx context.Context, request CreateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { + var federationPolicy FederationPolicy + path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/federationPolicies", a.client.ConfiguredAccountID(), request.ServicePrincipalId) + queryParams := make(map[string]any) + if request.PolicyId != "" || slices.Contains(request.ForceSendFields, "PolicyId") { + queryParams["policy_id"] = request.PolicyId + } + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Policy, &federationPolicy) + return &federationPolicy, err +} + +func (a *servicePrincipalFederationPolicyPreviewImpl) Delete(ctx context.Context, request DeleteServicePrincipalFederationPolicyRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.PolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *servicePrincipalFederationPolicyPreviewImpl) Get(ctx context.Context, request GetServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { + var federationPolicy FederationPolicy + path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.PolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &federationPolicy) + return &federationPolicy, err +} + +// List service principal federation policies. +func (a *servicePrincipalFederationPolicyPreviewImpl) List(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) listing.Iterator[FederationPolicy] { + + getNextPage := func(ctx context.Context, req ListServicePrincipalFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListFederationPoliciesResponse) []FederationPolicy { + return resp.Policies + } + getNextReq := func(resp *ListFederationPoliciesResponse) *ListServicePrincipalFederationPoliciesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List service principal federation policies. +func (a *servicePrincipalFederationPolicyPreviewImpl) ListAll(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) ([]FederationPolicy, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[FederationPolicy](ctx, iterator) +} +func (a *servicePrincipalFederationPolicyPreviewImpl) internalList(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { + var listFederationPoliciesResponse ListFederationPoliciesResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/federationPolicies", a.client.ConfiguredAccountID(), request.ServicePrincipalId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listFederationPoliciesResponse) + return &listFederationPoliciesResponse, err +} + +func (a *servicePrincipalFederationPolicyPreviewImpl) Update(ctx context.Context, request UpdateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { + var federationPolicy FederationPolicy + path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.PolicyId) + queryParams := make(map[string]any) + if request.UpdateMask != "" || slices.Contains(request.ForceSendFields, "UpdateMask") { + queryParams["update_mask"] = request.UpdateMask + } + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.Policy, &federationPolicy) + return &federationPolicy, err +} + +// unexported type that holds implementations of just ServicePrincipalSecretsPreview API methods +type servicePrincipalSecretsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *servicePrincipalSecretsPreviewImpl) Create(ctx context.Context, request CreateServicePrincipalSecretRequest) (*CreateServicePrincipalSecretResponse, error) { + var createServicePrincipalSecretResponse CreateServicePrincipalSecretResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/credentials/secrets", a.client.ConfiguredAccountID(), request.ServicePrincipalId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &createServicePrincipalSecretResponse) + return &createServicePrincipalSecretResponse, err +} + +func (a *servicePrincipalSecretsPreviewImpl) Delete(ctx context.Context, request DeleteServicePrincipalSecretRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/credentials/secrets/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.SecretId) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +// List service principal secrets. +// +// List all secrets associated with the given service principal. This operation +// only returns information about the secrets themselves and does not include +// the secret values. +func (a *servicePrincipalSecretsPreviewImpl) List(ctx context.Context, request ListServicePrincipalSecretsRequest) listing.Iterator[SecretInfo] { + + getNextPage := func(ctx context.Context, req ListServicePrincipalSecretsRequest) (*ListServicePrincipalSecretsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListServicePrincipalSecretsResponse) []SecretInfo { + return resp.Secrets + } + getNextReq := func(resp *ListServicePrincipalSecretsResponse) *ListServicePrincipalSecretsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List service principal secrets. +// +// List all secrets associated with the given service principal. This operation +// only returns information about the secrets themselves and does not include +// the secret values. +func (a *servicePrincipalSecretsPreviewImpl) ListAll(ctx context.Context, request ListServicePrincipalSecretsRequest) ([]SecretInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[SecretInfo](ctx, iterator) +} +func (a *servicePrincipalSecretsPreviewImpl) internalList(ctx context.Context, request ListServicePrincipalSecretsRequest) (*ListServicePrincipalSecretsResponse, error) { + var listServicePrincipalSecretsResponse ListServicePrincipalSecretsResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/credentials/secrets", a.client.ConfiguredAccountID(), request.ServicePrincipalId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listServicePrincipalSecretsResponse) + return &listServicePrincipalSecretsResponse, err +} diff --git a/oauth2/v2preview/model.go b/oauth2/v2preview/model.go new file mode 100755 index 000000000..0f54ddd1f --- /dev/null +++ b/oauth2/v2preview/model.go @@ -0,0 +1,708 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package oauth2preview + +import ( + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +// Create account federation policy +type CreateAccountFederationPolicyRequest struct { + Policy *FederationPolicy `json:"policy,omitempty"` + // The identifier for the federation policy. The identifier must contain + // only lowercase alphanumeric characters, numbers, hyphens, and slashes. If + // unspecified, the id will be assigned by Databricks. + PolicyId string `json:"-" url:"policy_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateAccountFederationPolicyRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateAccountFederationPolicyRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateCustomAppIntegration struct { + // This field indicates whether an OAuth client secret is required to + // authenticate this client. + Confidential bool `json:"confidential,omitempty"` + // Name of the custom OAuth app + Name string `json:"name,omitempty"` + // List of OAuth redirect urls + RedirectUrls []string `json:"redirect_urls,omitempty"` + // OAuth scopes granted to the application. Supported scopes: all-apis, sql, + // offline_access, openid, profile, email. + Scopes []string `json:"scopes,omitempty"` + // Token access policy + TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` + // Scopes that will need to be consented by end user to mint the access + // token. If the user does not authorize the access token will not be + // minted. Must be a subset of scopes. + UserAuthorizedScopes []string `json:"user_authorized_scopes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateCustomAppIntegration) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateCustomAppIntegration) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateCustomAppIntegrationOutput struct { + // OAuth client-id generated by the Databricks + ClientId string `json:"client_id,omitempty"` + // OAuth client-secret generated by the Databricks. If this is a + // confidential OAuth app client-secret will be generated. + ClientSecret string `json:"client_secret,omitempty"` + // Unique integration id for the custom OAuth app + IntegrationId string `json:"integration_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateCustomAppIntegrationOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateCustomAppIntegrationOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreatePublishedAppIntegration struct { + // App id of the OAuth published app integration. For example power-bi, + // tableau-deskop + AppId string `json:"app_id,omitempty"` + // Token access policy + TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreatePublishedAppIntegration) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreatePublishedAppIntegration) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreatePublishedAppIntegrationOutput struct { + // Unique integration id for the published OAuth app + IntegrationId string `json:"integration_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreatePublishedAppIntegrationOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreatePublishedAppIntegrationOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Create service principal federation policy +type CreateServicePrincipalFederationPolicyRequest struct { + Policy *FederationPolicy `json:"policy,omitempty"` + // The identifier for the federation policy. The identifier must contain + // only lowercase alphanumeric characters, numbers, hyphens, and slashes. If + // unspecified, the id will be assigned by Databricks. + PolicyId string `json:"-" url:"policy_id,omitempty"` + // The service principal id for the federation policy. + ServicePrincipalId int64 `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateServicePrincipalFederationPolicyRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateServicePrincipalFederationPolicyRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Create service principal secret +type CreateServicePrincipalSecretRequest struct { + // The service principal ID. + ServicePrincipalId int64 `json:"-" url:"-"` +} + +type CreateServicePrincipalSecretResponse struct { + // UTC time when the secret was created + CreateTime string `json:"create_time,omitempty"` + // ID of the secret + Id string `json:"id,omitempty"` + // Secret Value + Secret string `json:"secret,omitempty"` + // Secret Hash + SecretHash string `json:"secret_hash,omitempty"` + // Status of the secret + Status string `json:"status,omitempty"` + // UTC time when the secret was updated + UpdateTime string `json:"update_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateServicePrincipalSecretResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateServicePrincipalSecretResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete account federation policy +type DeleteAccountFederationPolicyRequest struct { + // The identifier for the federation policy. + PolicyId string `json:"-" url:"-"` +} + +type DeleteCustomAppIntegrationOutput struct { +} + +// Delete Custom OAuth App Integration +type DeleteCustomAppIntegrationRequest struct { + IntegrationId string `json:"-" url:"-"` +} + +type DeletePublishedAppIntegrationOutput struct { +} + +// Delete Published OAuth App Integration +type DeletePublishedAppIntegrationRequest struct { + IntegrationId string `json:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete service principal federation policy +type DeleteServicePrincipalFederationPolicyRequest struct { + // The identifier for the federation policy. + PolicyId string `json:"-" url:"-"` + // The service principal id for the federation policy. + ServicePrincipalId int64 `json:"-" url:"-"` +} + +// Delete service principal secret +type DeleteServicePrincipalSecretRequest struct { + // The secret ID. + SecretId string `json:"-" url:"-"` + // The service principal ID. + ServicePrincipalId int64 `json:"-" url:"-"` +} + +type FederationPolicy struct { + // Creation time of the federation policy. + CreateTime string `json:"create_time,omitempty"` + // Description of the federation policy. + Description string `json:"description,omitempty"` + // Resource name for the federation policy. Example values include + // `accounts//federationPolicies/my-federation-policy` for + // Account Federation Policies, and + // `accounts//servicePrincipals//federationPolicies/my-federation-policy` + // for Service Principal Federation Policies. Typically an output parameter, + // which does not need to be specified in create or update requests. If + // specified in a request, must match the value in the request URL. + Name string `json:"name,omitempty"` + // Specifies the policy to use for validating OIDC claims in your federated + // tokens. + OidcPolicy *OidcFederationPolicy `json:"oidc_policy,omitempty"` + // Unique, immutable id of the federation policy. + Uid string `json:"uid,omitempty"` + // Last update time of the federation policy. + UpdateTime string `json:"update_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *FederationPolicy) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FederationPolicy) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get account federation policy +type GetAccountFederationPolicyRequest struct { + // The identifier for the federation policy. + PolicyId string `json:"-" url:"-"` +} + +type GetCustomAppIntegrationOutput struct { + // The client id of the custom OAuth app + ClientId string `json:"client_id,omitempty"` + // This field indicates whether an OAuth client secret is required to + // authenticate this client. + Confidential bool `json:"confidential,omitempty"` + + CreateTime string `json:"create_time,omitempty"` + + CreatedBy int64 `json:"created_by,omitempty"` + + CreatorUsername string `json:"creator_username,omitempty"` + // ID of this custom app + IntegrationId string `json:"integration_id,omitempty"` + // The display name of the custom OAuth app + Name string `json:"name,omitempty"` + // List of OAuth redirect urls + RedirectUrls []string `json:"redirect_urls,omitempty"` + + Scopes []string `json:"scopes,omitempty"` + // Token access policy + TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` + // Scopes that will need to be consented by end user to mint the access + // token. If the user does not authorize the access token will not be + // minted. Must be a subset of scopes. + UserAuthorizedScopes []string `json:"user_authorized_scopes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetCustomAppIntegrationOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetCustomAppIntegrationOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get OAuth Custom App Integration +type GetCustomAppIntegrationRequest struct { + // The OAuth app integration ID. + IntegrationId string `json:"-" url:"-"` +} + +type GetCustomAppIntegrationsOutput struct { + // List of Custom OAuth App Integrations defined for the account. + Apps []GetCustomAppIntegrationOutput `json:"apps,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetCustomAppIntegrationsOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetCustomAppIntegrationsOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GetPublishedAppIntegrationOutput struct { + // App-id of the published app integration + AppId string `json:"app_id,omitempty"` + + CreateTime string `json:"create_time,omitempty"` + + CreatedBy int64 `json:"created_by,omitempty"` + // Unique integration id for the published OAuth app + IntegrationId string `json:"integration_id,omitempty"` + // Display name of the published OAuth app + Name string `json:"name,omitempty"` + // Token access policy + TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetPublishedAppIntegrationOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetPublishedAppIntegrationOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get OAuth Published App Integration +type GetPublishedAppIntegrationRequest struct { + IntegrationId string `json:"-" url:"-"` +} + +type GetPublishedAppIntegrationsOutput struct { + // List of Published OAuth App Integrations defined for the account. + Apps []GetPublishedAppIntegrationOutput `json:"apps,omitempty"` + + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetPublishedAppIntegrationsOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetPublishedAppIntegrationsOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GetPublishedAppsOutput struct { + // List of Published OAuth Apps. + Apps []PublishedAppOutput `json:"apps,omitempty"` + // A token that can be used to get the next page of results. If not present, + // there are no more results to show. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetPublishedAppsOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetPublishedAppsOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get service principal federation policy +type GetServicePrincipalFederationPolicyRequest struct { + // The identifier for the federation policy. + PolicyId string `json:"-" url:"-"` + // The service principal id for the federation policy. + ServicePrincipalId int64 `json:"-" url:"-"` +} + +// List account federation policies +type ListAccountFederationPoliciesRequest struct { + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAccountFederationPoliciesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAccountFederationPoliciesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get custom oauth app integrations +type ListCustomAppIntegrationsRequest struct { + IncludeCreatorUsername bool `json:"-" url:"include_creator_username,omitempty"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListCustomAppIntegrationsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListCustomAppIntegrationsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListFederationPoliciesResponse struct { + NextPageToken string `json:"next_page_token,omitempty"` + + Policies []FederationPolicy `json:"policies,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListFederationPoliciesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListFederationPoliciesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get all the published OAuth apps +type ListOAuthPublishedAppsRequest struct { + // The max number of OAuth published apps to return in one page. + PageSize int `json:"-" url:"page_size,omitempty"` + // A token that can be used to get the next page of results. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListOAuthPublishedAppsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListOAuthPublishedAppsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get published oauth app integrations +type ListPublishedAppIntegrationsRequest struct { + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListPublishedAppIntegrationsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListPublishedAppIntegrationsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List service principal federation policies +type ListServicePrincipalFederationPoliciesRequest struct { + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + // The service principal id for the federation policy. + ServicePrincipalId int64 `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListServicePrincipalFederationPoliciesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListServicePrincipalFederationPoliciesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List service principal secrets +type ListServicePrincipalSecretsRequest struct { + // An opaque page token which was the `next_page_token` in the response of + // the previous request to list the secrets for this service principal. + // Provide this token to retrieve the next page of secret entries. When + // providing a `page_token`, all other parameters provided to the request + // must match the previous request. To list all of the secrets for a service + // principal, it is necessary to continue requesting pages of entries until + // the response contains no `next_page_token`. Note that the number of + // entries returned must not be used to determine when the listing is + // complete. + PageToken string `json:"-" url:"page_token,omitempty"` + // The service principal ID. + ServicePrincipalId int64 `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListServicePrincipalSecretsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListServicePrincipalSecretsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListServicePrincipalSecretsResponse struct { + // A token, which can be sent as `page_token` to retrieve the next page. + NextPageToken string `json:"next_page_token,omitempty"` + // List of the secrets + Secrets []SecretInfo `json:"secrets,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListServicePrincipalSecretsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListServicePrincipalSecretsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Specifies the policy to use for validating OIDC claims in your federated +// tokens. +type OidcFederationPolicy struct { + // The allowed token audiences, as specified in the 'aud' claim of federated + // tokens. The audience identifier is intended to represent the recipient of + // the token. Can be any non-empty string value. As long as the audience in + // the token matches at least one audience in the policy, the token is + // considered a match. If audiences is unspecified, defaults to your + // Databricks account id. + Audiences []string `json:"audiences,omitempty"` + // The required token issuer, as specified in the 'iss' claim of federated + // tokens. + Issuer string `json:"issuer,omitempty"` + // The public keys used to validate the signature of federated tokens, in + // JWKS format. If unspecified (recommended), Databricks automatically + // fetches the public keys from your issuer’s well known endpoint. + // Databricks strongly recommends relying on your issuer’s well known + // endpoint for discovering public keys. + JwksJson string `json:"jwks_json,omitempty"` + // The required token subject, as specified in the subject claim of + // federated tokens. Must be specified for service principal federation + // policies. Must not be specified for account federation policies. + Subject string `json:"subject,omitempty"` + // The claim that contains the subject of the token. If unspecified, the + // default value is 'sub'. + SubjectClaim string `json:"subject_claim,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *OidcFederationPolicy) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s OidcFederationPolicy) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PublishedAppOutput struct { + // Unique ID of the published OAuth app. + AppId string `json:"app_id,omitempty"` + // Client ID of the published OAuth app. It is the client_id in the OAuth + // flow + ClientId string `json:"client_id,omitempty"` + // Description of the published OAuth app. + Description string `json:"description,omitempty"` + // Whether the published OAuth app is a confidential client. It is always + // false for published OAuth apps. + IsConfidentialClient bool `json:"is_confidential_client,omitempty"` + // The display name of the published OAuth app. + Name string `json:"name,omitempty"` + // Redirect URLs of the published OAuth app. + RedirectUrls []string `json:"redirect_urls,omitempty"` + // Required scopes for the published OAuth app. + Scopes []string `json:"scopes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PublishedAppOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PublishedAppOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SecretInfo struct { + // UTC time when the secret was created + CreateTime string `json:"create_time,omitempty"` + // ID of the secret + Id string `json:"id,omitempty"` + // Secret Hash + SecretHash string `json:"secret_hash,omitempty"` + // Status of the secret + Status string `json:"status,omitempty"` + // UTC time when the secret was updated + UpdateTime string `json:"update_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SecretInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SecretInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TokenAccessPolicy struct { + // access token time to live in minutes + AccessTokenTtlInMinutes int `json:"access_token_ttl_in_minutes,omitempty"` + // refresh token time to live in minutes + RefreshTokenTtlInMinutes int `json:"refresh_token_ttl_in_minutes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TokenAccessPolicy) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TokenAccessPolicy) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Update account federation policy +type UpdateAccountFederationPolicyRequest struct { + Policy *FederationPolicy `json:"policy,omitempty"` + // The identifier for the federation policy. + PolicyId string `json:"-" url:"-"` + // The field mask specifies which fields of the policy to update. To specify + // multiple fields in the field mask, use comma as the separator (no space). + // The special value '*' indicates that all fields should be updated (full + // replacement). If unspecified, all fields that are set in the policy + // provided in the update request will overwrite the corresponding fields in + // the existing policy. Example value: 'description,oidc_policy.audiences'. + UpdateMask string `json:"-" url:"update_mask,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateAccountFederationPolicyRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateAccountFederationPolicyRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateCustomAppIntegration struct { + IntegrationId string `json:"-" url:"-"` + // List of OAuth redirect urls to be updated in the custom OAuth app + // integration + RedirectUrls []string `json:"redirect_urls,omitempty"` + // List of OAuth scopes to be updated in the custom OAuth app integration, + // similar to redirect URIs this will fully replace the existing values + // instead of appending + Scopes []string `json:"scopes,omitempty"` + // Token access policy to be updated in the custom OAuth app integration + TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` + // Scopes that will need to be consented by end user to mint the access + // token. If the user does not authorize the access token will not be + // minted. Must be a subset of scopes. + UserAuthorizedScopes []string `json:"user_authorized_scopes,omitempty"` +} + +type UpdateCustomAppIntegrationOutput struct { +} + +type UpdatePublishedAppIntegration struct { + IntegrationId string `json:"-" url:"-"` + // Token access policy to be updated in the published OAuth app integration + TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` +} + +type UpdatePublishedAppIntegrationOutput struct { +} + +// Update service principal federation policy +type UpdateServicePrincipalFederationPolicyRequest struct { + Policy *FederationPolicy `json:"policy,omitempty"` + // The identifier for the federation policy. + PolicyId string `json:"-" url:"-"` + // The service principal id for the federation policy. + ServicePrincipalId int64 `json:"-" url:"-"` + // The field mask specifies which fields of the policy to update. To specify + // multiple fields in the field mask, use comma as the separator (no space). + // The special value '*' indicates that all fields should be updated (full + // replacement). If unspecified, all fields that are set in the policy + // provided in the update request will overwrite the corresponding fields in + // the existing policy. Example value: 'description,oidc_policy.audiences'. + UpdateMask string `json:"-" url:"update_mask,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateServicePrincipalFederationPolicyRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateServicePrincipalFederationPolicyRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} diff --git a/pipelines/v2preview/api.go b/pipelines/v2preview/api.go new file mode 100755 index 000000000..83ba4affa --- /dev/null +++ b/pipelines/v2preview/api.go @@ -0,0 +1,304 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines. +package pipelinespreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type PipelinesPreviewInterface interface { + + // Create a pipeline. + // + // Creates a new data processing pipeline based on the requested configuration. + // If successful, this method returns the ID of the new pipeline. + Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error) + + // Delete a pipeline. + // + // Deletes a pipeline. + Delete(ctx context.Context, request DeletePipelineRequest) error + + // Delete a pipeline. + // + // Deletes a pipeline. + DeleteByPipelineId(ctx context.Context, pipelineId string) error + + // Get a pipeline. + Get(ctx context.Context, request GetPipelineRequest) (*GetPipelineResponse, error) + + // Get a pipeline. + GetByPipelineId(ctx context.Context, pipelineId string) (*GetPipelineResponse, error) + + // Get pipeline permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetPipelinePermissionLevelsRequest) (*GetPipelinePermissionLevelsResponse, error) + + // Get pipeline permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByPipelineId(ctx context.Context, pipelineId string) (*GetPipelinePermissionLevelsResponse, error) + + // Get pipeline permissions. + // + // Gets the permissions of a pipeline. Pipelines can inherit permissions from + // their root object. + GetPermissions(ctx context.Context, request GetPipelinePermissionsRequest) (*PipelinePermissions, error) + + // Get pipeline permissions. + // + // Gets the permissions of a pipeline. Pipelines can inherit permissions from + // their root object. + GetPermissionsByPipelineId(ctx context.Context, pipelineId string) (*PipelinePermissions, error) + + // Get a pipeline update. + // + // Gets an update from an active pipeline. + GetUpdate(ctx context.Context, request GetUpdateRequest) (*GetUpdateResponse, error) + + // Get a pipeline update. + // + // Gets an update from an active pipeline. + GetUpdateByPipelineIdAndUpdateId(ctx context.Context, pipelineId string, updateId string) (*GetUpdateResponse, error) + + // List pipeline events. + // + // Retrieves events for a pipeline. + // + // This method is generated by Databricks SDK Code Generator. + ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) listing.Iterator[PipelineEvent] + + // List pipeline events. + // + // Retrieves events for a pipeline. + // + // This method is generated by Databricks SDK Code Generator. + ListPipelineEventsAll(ctx context.Context, request ListPipelineEventsRequest) ([]PipelineEvent, error) + + // List pipeline events. + // + // Retrieves events for a pipeline. + ListPipelineEventsByPipelineId(ctx context.Context, pipelineId string) (*ListPipelineEventsResponse, error) + + // List pipelines. + // + // Lists pipelines defined in the Delta Live Tables system. + // + // This method is generated by Databricks SDK Code Generator. + ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo] + + // List pipelines. + // + // Lists pipelines defined in the Delta Live Tables system. + // + // This method is generated by Databricks SDK Code Generator. + ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) + + // PipelineStateInfoNameToPipelineIdMap calls [PipelinesPreviewAPI.ListPipelinesAll] and creates a map of results with [PipelineStateInfo].Name as key and [PipelineStateInfo].PipelineId as value. + // + // Returns an error if there's more than one [PipelineStateInfo] with the same .Name. + // + // Note: All [PipelineStateInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + PipelineStateInfoNameToPipelineIdMap(ctx context.Context, request ListPipelinesRequest) (map[string]string, error) + + // GetByName calls [PipelinesPreviewAPI.PipelineStateInfoNameToPipelineIdMap] and returns a single [PipelineStateInfo]. + // + // Returns an error if there's more than one [PipelineStateInfo] with the same .Name. + // + // Note: All [PipelineStateInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*PipelineStateInfo, error) + + // List pipeline updates. + // + // List updates for an active pipeline. + ListUpdates(ctx context.Context, request ListUpdatesRequest) (*ListUpdatesResponse, error) + + // List pipeline updates. + // + // List updates for an active pipeline. + ListUpdatesByPipelineId(ctx context.Context, pipelineId string) (*ListUpdatesResponse, error) + + // Set pipeline permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) + + // Start a pipeline. + // + // Starts a new update for the pipeline. If there is already an active update + // for the pipeline, the request will fail and the active update will remain + // running. + StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error) + + // Stop a pipeline. + // + // Stops the pipeline by canceling the active update. If there is no active + // update for the pipeline, this request is a no-op. + Stop(ctx context.Context, request StopRequest) error + + // Edit a pipeline. + // + // Updates a pipeline with the supplied configuration. + Update(ctx context.Context, request EditPipeline) error + + // Update pipeline permissions. + // + // Updates the permissions on a pipeline. Pipelines can inherit permissions from + // their root object. + UpdatePermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) +} + +func NewPipelinesPreview(client *client.DatabricksClient) *PipelinesPreviewAPI { + return &PipelinesPreviewAPI{ + pipelinesPreviewImpl: pipelinesPreviewImpl{ + client: client, + }, + } +} + +// The Delta Live Tables API allows you to create, edit, delete, start, and view +// details about pipelines. +// +// Delta Live Tables is a framework for building reliable, maintainable, and +// testable data processing pipelines. You define the transformations to perform +// on your data, and Delta Live Tables manages task orchestration, cluster +// management, monitoring, data quality, and error handling. +// +// Instead of defining your data pipelines using a series of separate Apache +// Spark tasks, Delta Live Tables manages how your data is transformed based on +// a target schema you define for each processing step. You can also enforce +// data quality with Delta Live Tables expectations. Expectations allow you to +// define expected data quality and specify how to handle records that fail +// those expectations. +type PipelinesPreviewAPI struct { + pipelinesPreviewImpl +} + +// Delete a pipeline. +// +// Deletes a pipeline. +func (a *PipelinesPreviewAPI) DeleteByPipelineId(ctx context.Context, pipelineId string) error { + return a.pipelinesPreviewImpl.Delete(ctx, DeletePipelineRequest{ + PipelineId: pipelineId, + }) +} + +// Get a pipeline. +func (a *PipelinesPreviewAPI) GetByPipelineId(ctx context.Context, pipelineId string) (*GetPipelineResponse, error) { + return a.pipelinesPreviewImpl.Get(ctx, GetPipelineRequest{ + PipelineId: pipelineId, + }) +} + +// Get pipeline permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *PipelinesPreviewAPI) GetPermissionLevelsByPipelineId(ctx context.Context, pipelineId string) (*GetPipelinePermissionLevelsResponse, error) { + return a.pipelinesPreviewImpl.GetPermissionLevels(ctx, GetPipelinePermissionLevelsRequest{ + PipelineId: pipelineId, + }) +} + +// Get pipeline permissions. +// +// Gets the permissions of a pipeline. Pipelines can inherit permissions from +// their root object. +func (a *PipelinesPreviewAPI) GetPermissionsByPipelineId(ctx context.Context, pipelineId string) (*PipelinePermissions, error) { + return a.pipelinesPreviewImpl.GetPermissions(ctx, GetPipelinePermissionsRequest{ + PipelineId: pipelineId, + }) +} + +// Get a pipeline update. +// +// Gets an update from an active pipeline. +func (a *PipelinesPreviewAPI) GetUpdateByPipelineIdAndUpdateId(ctx context.Context, pipelineId string, updateId string) (*GetUpdateResponse, error) { + return a.pipelinesPreviewImpl.GetUpdate(ctx, GetUpdateRequest{ + PipelineId: pipelineId, + UpdateId: updateId, + }) +} + +// List pipeline events. +// +// Retrieves events for a pipeline. +func (a *PipelinesPreviewAPI) ListPipelineEventsByPipelineId(ctx context.Context, pipelineId string) (*ListPipelineEventsResponse, error) { + return a.pipelinesPreviewImpl.internalListPipelineEvents(ctx, ListPipelineEventsRequest{ + PipelineId: pipelineId, + }) +} + +// PipelineStateInfoNameToPipelineIdMap calls [PipelinesPreviewAPI.ListPipelinesAll] and creates a map of results with [PipelineStateInfo].Name as key and [PipelineStateInfo].PipelineId as value. +// +// Returns an error if there's more than one [PipelineStateInfo] with the same .Name. +// +// Note: All [PipelineStateInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *PipelinesPreviewAPI) PipelineStateInfoNameToPipelineIdMap(ctx context.Context, request ListPipelinesRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListPipelinesAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.PipelineId + } + return mapping, nil +} + +// GetByName calls [PipelinesPreviewAPI.PipelineStateInfoNameToPipelineIdMap] and returns a single [PipelineStateInfo]. +// +// Returns an error if there's more than one [PipelineStateInfo] with the same .Name. +// +// Note: All [PipelineStateInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *PipelinesPreviewAPI) GetByName(ctx context.Context, name string) (*PipelineStateInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListPipelinesAll(ctx, ListPipelinesRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]PipelineStateInfo{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("PipelineStateInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of PipelineStateInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +// List pipeline updates. +// +// List updates for an active pipeline. +func (a *PipelinesPreviewAPI) ListUpdatesByPipelineId(ctx context.Context, pipelineId string) (*ListUpdatesResponse, error) { + return a.pipelinesPreviewImpl.ListUpdates(ctx, ListUpdatesRequest{ + PipelineId: pipelineId, + }) +} diff --git a/pipelines/v2preview/client.go b/pipelines/v2preview/client.go new file mode 100755 index 000000000..f3cb34b01 --- /dev/null +++ b/pipelines/v2preview/client.go @@ -0,0 +1,45 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package pipelinespreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type PipelinesPreviewClient struct { + PipelinesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewPipelinesPreviewClient(cfg *config.Config) (*PipelinesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &PipelinesPreviewClient{ + Config: cfg, + apiClient: apiClient, + PipelinesPreviewInterface: NewPipelinesPreview(databricksClient), + }, nil +} diff --git a/pipelines/v2preview/impl.go b/pipelines/v2preview/impl.go new file mode 100755 index 000000000..28ddfcf8c --- /dev/null +++ b/pipelines/v2preview/impl.go @@ -0,0 +1,233 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package pipelinespreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just PipelinesPreview API methods +type pipelinesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *pipelinesPreviewImpl) Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error) { + var createPipelineResponse CreatePipelineResponse + path := "/api/2.0preview/pipelines" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createPipelineResponse) + return &createPipelineResponse, err +} + +func (a *pipelinesPreviewImpl) Delete(ctx context.Context, request DeletePipelineRequest) error { + var deletePipelineResponse DeletePipelineResponse + path := fmt.Sprintf("/api/2.0preview/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deletePipelineResponse) + return err +} + +func (a *pipelinesPreviewImpl) Get(ctx context.Context, request GetPipelineRequest) (*GetPipelineResponse, error) { + var getPipelineResponse GetPipelineResponse + path := fmt.Sprintf("/api/2.0preview/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPipelineResponse) + return &getPipelineResponse, err +} + +func (a *pipelinesPreviewImpl) GetPermissionLevels(ctx context.Context, request GetPipelinePermissionLevelsRequest) (*GetPipelinePermissionLevelsResponse, error) { + var getPipelinePermissionLevelsResponse GetPipelinePermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/pipelines/%v/permissionLevels", request.PipelineId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPipelinePermissionLevelsResponse) + return &getPipelinePermissionLevelsResponse, err +} + +func (a *pipelinesPreviewImpl) GetPermissions(ctx context.Context, request GetPipelinePermissionsRequest) (*PipelinePermissions, error) { + var pipelinePermissions PipelinePermissions + path := fmt.Sprintf("/api/2.0preview/permissions/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &pipelinePermissions) + return &pipelinePermissions, err +} + +func (a *pipelinesPreviewImpl) GetUpdate(ctx context.Context, request GetUpdateRequest) (*GetUpdateResponse, error) { + var getUpdateResponse GetUpdateResponse + path := fmt.Sprintf("/api/2.0preview/pipelines/%v/updates/%v", request.PipelineId, request.UpdateId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getUpdateResponse) + return &getUpdateResponse, err +} + +// List pipeline events. +// +// Retrieves events for a pipeline. +func (a *pipelinesPreviewImpl) ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) listing.Iterator[PipelineEvent] { + + getNextPage := func(ctx context.Context, req ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListPipelineEvents(ctx, req) + } + getItems := func(resp *ListPipelineEventsResponse) []PipelineEvent { + return resp.Events + } + getNextReq := func(resp *ListPipelineEventsResponse) *ListPipelineEventsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List pipeline events. +// +// Retrieves events for a pipeline. +func (a *pipelinesPreviewImpl) ListPipelineEventsAll(ctx context.Context, request ListPipelineEventsRequest) ([]PipelineEvent, error) { + iterator := a.ListPipelineEvents(ctx, request) + return listing.ToSliceN[PipelineEvent, int](ctx, iterator, request.MaxResults) + +} +func (a *pipelinesPreviewImpl) internalListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { + var listPipelineEventsResponse ListPipelineEventsResponse + path := fmt.Sprintf("/api/2.0preview/pipelines/%v/events", request.PipelineId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listPipelineEventsResponse) + return &listPipelineEventsResponse, err +} + +// List pipelines. +// +// Lists pipelines defined in the Delta Live Tables system. +func (a *pipelinesPreviewImpl) ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo] { + + getNextPage := func(ctx context.Context, req ListPipelinesRequest) (*ListPipelinesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListPipelines(ctx, req) + } + getItems := func(resp *ListPipelinesResponse) []PipelineStateInfo { + return resp.Statuses + } + getNextReq := func(resp *ListPipelinesResponse) *ListPipelinesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List pipelines. +// +// Lists pipelines defined in the Delta Live Tables system. +func (a *pipelinesPreviewImpl) ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) { + iterator := a.ListPipelines(ctx, request) + return listing.ToSliceN[PipelineStateInfo, int](ctx, iterator, request.MaxResults) + +} +func (a *pipelinesPreviewImpl) internalListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error) { + var listPipelinesResponse ListPipelinesResponse + path := "/api/2.0preview/pipelines" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listPipelinesResponse) + return &listPipelinesResponse, err +} + +func (a *pipelinesPreviewImpl) ListUpdates(ctx context.Context, request ListUpdatesRequest) (*ListUpdatesResponse, error) { + var listUpdatesResponse ListUpdatesResponse + path := fmt.Sprintf("/api/2.0preview/pipelines/%v/updates", request.PipelineId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listUpdatesResponse) + return &listUpdatesResponse, err +} + +func (a *pipelinesPreviewImpl) SetPermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) { + var pipelinePermissions PipelinePermissions + path := fmt.Sprintf("/api/2.0preview/permissions/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &pipelinePermissions) + return &pipelinePermissions, err +} + +func (a *pipelinesPreviewImpl) StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error) { + var startUpdateResponse StartUpdateResponse + path := fmt.Sprintf("/api/2.0preview/pipelines/%v/updates", request.PipelineId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &startUpdateResponse) + return &startUpdateResponse, err +} + +func (a *pipelinesPreviewImpl) Stop(ctx context.Context, request StopRequest) error { + var stopPipelineResponse StopPipelineResponse + path := fmt.Sprintf("/api/2.0preview/pipelines/%v/stop", request.PipelineId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &stopPipelineResponse) + return err +} + +func (a *pipelinesPreviewImpl) Update(ctx context.Context, request EditPipeline) error { + var editPipelineResponse EditPipelineResponse + path := fmt.Sprintf("/api/2.0preview/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &editPipelineResponse) + return err +} + +func (a *pipelinesPreviewImpl) UpdatePermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) { + var pipelinePermissions PipelinePermissions + path := fmt.Sprintf("/api/2.0preview/permissions/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &pipelinePermissions) + return &pipelinePermissions, err +} diff --git a/pipelines/v2preview/model.go b/pipelines/v2preview/model.go new file mode 100755 index 000000000..12fccfc28 --- /dev/null +++ b/pipelines/v2preview/model.go @@ -0,0 +1,2321 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package pipelinespreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type Adlsgen2Info struct { + // abfss destination, e.g. + // `abfss://@.dfs.core.windows.net/`. + Destination string `json:"destination"` +} + +type AwsAttributes struct { + // Availability type used for all subsequent nodes past the + // `first_on_demand` ones. + // + // Note: If `first_on_demand` is zero, this availability type will be used + // for the entire cluster. + Availability AwsAvailability `json:"availability,omitempty"` + // The number of volumes launched for each instance. Users can choose up to + // 10 volumes. This feature is only enabled for supported node types. Legacy + // node types cannot specify custom EBS volumes. For node types with no + // instance store, at least one EBS volume needs to be specified; otherwise, + // cluster creation will fail. + // + // These EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc. Instance + // store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc. + // + // If EBS volumes are attached, Databricks will configure Spark to use only + // the EBS volumes for scratch storage because heterogenously sized scratch + // devices can lead to inefficient disk utilization. If no EBS volumes are + // attached, Databricks will configure Spark to use instance store volumes. + // + // Please note that if EBS volumes are specified, then the Spark + // configuration `spark.local.dir` will be overridden. + EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + // If using gp3 volumes, what IOPS to use for the disk. If this is not set, + // the maximum performance of a gp2 volume with the same volume size will be + // used. + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` + // The size of each EBS volume (in GiB) launched for each instance. For + // general purpose SSD, this value must be within the range 100 - 4096. For + // throughput optimized HDD, this value must be within the range 500 - 4096. + EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + // If using gp3 volumes, what throughput to use for the disk. If this is not + // set, the maximum performance of a gp2 volume with the same volume size + // will be used. + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` + // The type of EBS volumes that will be launched with this cluster. + EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty"` + // The first `first_on_demand` nodes of the cluster will be placed on + // on-demand instances. If this value is greater than 0, the cluster driver + // node in particular will be placed on an on-demand instance. If this value + // is greater than or equal to the current cluster size, all nodes will be + // placed on on-demand instances. If this value is less than the current + // cluster size, `first_on_demand` nodes will be placed on on-demand + // instances and the remainder will be placed on `availability` instances. + // Note that this value does not affect cluster size and cannot currently be + // mutated over the lifetime of a cluster. + FirstOnDemand int `json:"first_on_demand,omitempty"` + // Nodes for this cluster will only be placed on AWS instances with this + // instance profile. If ommitted, nodes will be placed on instances without + // an IAM instance profile. The instance profile must have previously been + // added to the Databricks environment by an account administrator. + // + // This feature may only be available to certain customer plans. + // + // If this field is ommitted, we will pull in the default from the conf if + // it exists. + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // The bid price for AWS spot instances, as a percentage of the + // corresponding instance type's on-demand price. For example, if this field + // is set to 50, and the cluster needs a new `r3.xlarge` spot instance, then + // the bid price is half of the price of on-demand `r3.xlarge` instances. + // Similarly, if this field is set to 200, the bid price is twice the price + // of on-demand `r3.xlarge` instances. If not specified, the default value + // is 100. When spot instances are requested for this cluster, only spot + // instances whose bid price percentage matches this field will be + // considered. Note that, for safety, we enforce this field to be no more + // than 10000. + // + // The default value and documentation here should be kept consistent with + // CommonConf.defaultSpotBidPricePercent and + // CommonConf.maxSpotBidPricePercent. + SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` + // Identifier for the availability zone/datacenter in which the cluster + // resides. This string will be of a form like "us-west-2a". The provided + // availability zone must be in the same region as the Databricks + // deployment. For example, "us-west-2a" is not a valid zone id if the + // Databricks deployment resides in the "us-east-1" region. This is an + // optional field at cluster creation, and if not specified, a default zone + // will be used. If the zone specified is "auto", will try to place cluster + // in a zone with high availability, and will retry placement in a different + // AZ if there is not enough capacity. The list of available zones as well + // as the default value can be found by using the `List Zones` method. + ZoneId string `json:"zone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AwsAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AwsAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Availability type used for all subsequent nodes past the `first_on_demand` +// ones. +// +// Note: If `first_on_demand` is zero, this availability type will be used for +// the entire cluster. +type AwsAvailability string + +const AwsAvailabilityOnDemand AwsAvailability = `ON_DEMAND` + +const AwsAvailabilitySpot AwsAvailability = `SPOT` + +const AwsAvailabilitySpotWithFallback AwsAvailability = `SPOT_WITH_FALLBACK` + +// String representation for [fmt.Print] +func (f *AwsAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AwsAvailability) Set(v string) error { + switch v { + case `ON_DEMAND`, `SPOT`, `SPOT_WITH_FALLBACK`: + *f = AwsAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND", "SPOT", "SPOT_WITH_FALLBACK"`, v) + } +} + +// Type always returns AwsAvailability to satisfy [pflag.Value] interface +func (f *AwsAvailability) Type() string { + return "AwsAvailability" +} + +type AzureAttributes struct { + // Availability type used for all subsequent nodes past the + // `first_on_demand` ones. Note: If `first_on_demand` is zero (which only + // happens on pool clusters), this availability type will be used for the + // entire cluster. + Availability AzureAvailability `json:"availability,omitempty"` + // The first `first_on_demand` nodes of the cluster will be placed on + // on-demand instances. This value should be greater than 0, to make sure + // the cluster driver node is placed on an on-demand instance. If this value + // is greater than or equal to the current cluster size, all nodes will be + // placed on on-demand instances. If this value is less than the current + // cluster size, `first_on_demand` nodes will be placed on on-demand + // instances and the remainder will be placed on `availability` instances. + // Note that this value does not affect cluster size and cannot currently be + // mutated over the lifetime of a cluster. + FirstOnDemand int `json:"first_on_demand,omitempty"` + // Defines values necessary to configure and run Azure Log Analytics agent + LogAnalyticsInfo *LogAnalyticsInfo `json:"log_analytics_info,omitempty"` + // The max bid price to be used for Azure spot instances. The Max price for + // the bid cannot be higher than the on-demand price of the instance. If not + // specified, the default value is -1, which specifies that the instance + // cannot be evicted on the basis of price, and only on the basis of + // availability. Further, the value should > 0 or -1. + SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AzureAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AzureAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Availability type used for all subsequent nodes past the `first_on_demand` +// ones. Note: If `first_on_demand` is zero (which only happens on pool +// clusters), this availability type will be used for the entire cluster. +type AzureAvailability string + +const AzureAvailabilityOnDemandAzure AzureAvailability = `ON_DEMAND_AZURE` + +const AzureAvailabilitySpotAzure AzureAvailability = `SPOT_AZURE` + +const AzureAvailabilitySpotWithFallbackAzure AzureAvailability = `SPOT_WITH_FALLBACK_AZURE` + +// String representation for [fmt.Print] +func (f *AzureAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AzureAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_AZURE`, `SPOT_AZURE`, `SPOT_WITH_FALLBACK_AZURE`: + *f = AzureAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_AZURE", "SPOT_AZURE", "SPOT_WITH_FALLBACK_AZURE"`, v) + } +} + +// Type always returns AzureAvailability to satisfy [pflag.Value] interface +func (f *AzureAvailability) Type() string { + return "AzureAvailability" +} + +type ClusterLogConf struct { + // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : + // "dbfs:/home/cluster_log" } }` + Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` + // destination and either the region or endpoint need to be provided. e.g. + // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : + // "us-west-2" } }` Cluster iam role is used to access s3, please make sure + // the cluster iam role in `instance_profile_arn` has permission to write + // data to the s3 destination. + S3 *S3StorageInfo `json:"s3,omitempty"` + // destination needs to be provided. e.g. `{ "volumes" : { "destination" : + // "/Volumes/catalog/schema/volume/cluster_log" } }` + Volumes *VolumesStorageInfo `json:"volumes,omitempty"` +} + +type CreatePipeline struct { + // If false, deployment will fail if name conflicts with that of another + // pipeline. + AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"` + // Budget policy of this pipeline. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` + // A catalog in Unity Catalog to publish data from this pipeline to. If + // `target` is specified, tables in this pipeline are published to a + // `target` schema inside `catalog` (for example, + // `catalog`.`target`.`table`). If `target` is not specified, no data is + // published to Unity Catalog. + Catalog string `json:"catalog,omitempty"` + // DLT Release Channel that specifies which version to use. + Channel string `json:"channel,omitempty"` + // Cluster settings for this pipeline deployment. + Clusters []PipelineCluster `json:"clusters,omitempty"` + // String-String configuration for this pipeline execution. + Configuration map[string]string `json:"configuration,omitempty"` + // Whether the pipeline is continuous or triggered. This replaces `trigger`. + Continuous bool `json:"continuous,omitempty"` + // Deployment type of this pipeline. + Deployment *PipelineDeployment `json:"deployment,omitempty"` + // Whether the pipeline is in Development mode. Defaults to false. + Development bool `json:"development,omitempty"` + + DryRun bool `json:"dry_run,omitempty"` + // Pipeline product edition. + Edition string `json:"edition,omitempty"` + // Filters on which Pipeline packages to include in the deployed graph. + Filters *Filters `json:"filters,omitempty"` + // The definition of a gateway pipeline to support change data capture. + GatewayDefinition *IngestionGatewayPipelineDefinition `json:"gateway_definition,omitempty"` + // Unique identifier for this pipeline. + Id string `json:"id,omitempty"` + // The configuration for a managed ingestion pipeline. These settings cannot + // be used with the 'libraries', 'target' or 'catalog' settings. + IngestionDefinition *IngestionPipelineDefinition `json:"ingestion_definition,omitempty"` + // Libraries or code needed by this deployment. + Libraries []PipelineLibrary `json:"libraries,omitempty"` + // Friendly identifier for this pipeline. + Name string `json:"name,omitempty"` + // List of notification settings for this pipeline. + Notifications []Notifications `json:"notifications,omitempty"` + // Whether Photon is enabled for this pipeline. + Photon bool `json:"photon,omitempty"` + // Restart window of this pipeline. + RestartWindow *RestartWindow `json:"restart_window,omitempty"` + // Write-only setting, available only in Create/Update calls. Specifies the + // user or service principal that the pipeline runs as. If not specified, + // the pipeline runs as the user who created the pipeline. + // + // Only `user_name` or `service_principal_name` can be specified. If both + // are specified, an error is thrown. + RunAs *RunAs `json:"run_as,omitempty"` + // The default schema (database) where tables are read from or published to. + // The presence of this field implies that the pipeline is in direct + // publishing mode. + Schema string `json:"schema,omitempty"` + // Whether serverless compute is enabled for this pipeline. + Serverless bool `json:"serverless,omitempty"` + // DBFS root directory for storing checkpoints and tables. + Storage string `json:"storage,omitempty"` + // Target schema (database) to add tables in this pipeline to. If not + // specified, no data is published to the Hive metastore or Unity Catalog. + // To publish to Unity Catalog, also specify `catalog`. + Target string `json:"target,omitempty"` + // Which pipeline trigger to use. Deprecated: Use `continuous` instead. + Trigger *PipelineTrigger `json:"trigger,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreatePipeline) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreatePipeline) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreatePipelineResponse struct { + // Only returned when dry_run is true. + EffectiveSettings *PipelineSpec `json:"effective_settings,omitempty"` + // The unique identifier for the newly created pipeline. Only returned when + // dry_run is false. + PipelineId string `json:"pipeline_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreatePipelineResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreatePipelineResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CronTrigger struct { + QuartzCronSchedule string `json:"quartz_cron_schedule,omitempty"` + + TimezoneId string `json:"timezone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CronTrigger) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CronTrigger) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DataPlaneId struct { + // The instance name of the data plane emitting an event. + Instance string `json:"instance,omitempty"` + // A sequence number, unique and increasing within the data plane instance. + SeqNo int `json:"seq_no,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DataPlaneId) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DataPlaneId) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Days of week in which the restart is allowed to happen (within a five-hour +// window starting at start_hour). If not specified all days of the week will be +// used. +type DayOfWeek string + +const DayOfWeekFriday DayOfWeek = `FRIDAY` + +const DayOfWeekMonday DayOfWeek = `MONDAY` + +const DayOfWeekSaturday DayOfWeek = `SATURDAY` + +const DayOfWeekSunday DayOfWeek = `SUNDAY` + +const DayOfWeekThursday DayOfWeek = `THURSDAY` + +const DayOfWeekTuesday DayOfWeek = `TUESDAY` + +const DayOfWeekWednesday DayOfWeek = `WEDNESDAY` + +// String representation for [fmt.Print] +func (f *DayOfWeek) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DayOfWeek) Set(v string) error { + switch v { + case `FRIDAY`, `MONDAY`, `SATURDAY`, `SUNDAY`, `THURSDAY`, `TUESDAY`, `WEDNESDAY`: + *f = DayOfWeek(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FRIDAY", "MONDAY", "SATURDAY", "SUNDAY", "THURSDAY", "TUESDAY", "WEDNESDAY"`, v) + } +} + +// Type always returns DayOfWeek to satisfy [pflag.Value] interface +func (f *DayOfWeek) Type() string { + return "DayOfWeek" +} + +type DbfsStorageInfo struct { + // dbfs destination, e.g. `dbfs:/my/path` + Destination string `json:"destination"` +} + +// Delete a pipeline +type DeletePipelineRequest struct { + PipelineId string `json:"-" url:"-"` +} + +type DeletePipelineResponse struct { +} + +// The deployment method that manages the pipeline: - BUNDLE: The pipeline is +// managed by a Databricks Asset Bundle. +type DeploymentKind string + +const DeploymentKindBundle DeploymentKind = `BUNDLE` + +// String representation for [fmt.Print] +func (f *DeploymentKind) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DeploymentKind) Set(v string) error { + switch v { + case `BUNDLE`: + *f = DeploymentKind(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BUNDLE"`, v) + } +} + +// Type always returns DeploymentKind to satisfy [pflag.Value] interface +func (f *DeploymentKind) Type() string { + return "DeploymentKind" +} + +// The type of EBS volumes that will be launched with this cluster. +type EbsVolumeType string + +const EbsVolumeTypeGeneralPurposeSsd EbsVolumeType = `GENERAL_PURPOSE_SSD` + +const EbsVolumeTypeThroughputOptimizedHdd EbsVolumeType = `THROUGHPUT_OPTIMIZED_HDD` + +// String representation for [fmt.Print] +func (f *EbsVolumeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EbsVolumeType) Set(v string) error { + switch v { + case `GENERAL_PURPOSE_SSD`, `THROUGHPUT_OPTIMIZED_HDD`: + *f = EbsVolumeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GENERAL_PURPOSE_SSD", "THROUGHPUT_OPTIMIZED_HDD"`, v) + } +} + +// Type always returns EbsVolumeType to satisfy [pflag.Value] interface +func (f *EbsVolumeType) Type() string { + return "EbsVolumeType" +} + +type EditPipeline struct { + // If false, deployment will fail if name has changed and conflicts the name + // of another pipeline. + AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"` + // Budget policy of this pipeline. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` + // A catalog in Unity Catalog to publish data from this pipeline to. If + // `target` is specified, tables in this pipeline are published to a + // `target` schema inside `catalog` (for example, + // `catalog`.`target`.`table`). If `target` is not specified, no data is + // published to Unity Catalog. + Catalog string `json:"catalog,omitempty"` + // DLT Release Channel that specifies which version to use. + Channel string `json:"channel,omitempty"` + // Cluster settings for this pipeline deployment. + Clusters []PipelineCluster `json:"clusters,omitempty"` + // String-String configuration for this pipeline execution. + Configuration map[string]string `json:"configuration,omitempty"` + // Whether the pipeline is continuous or triggered. This replaces `trigger`. + Continuous bool `json:"continuous,omitempty"` + // Deployment type of this pipeline. + Deployment *PipelineDeployment `json:"deployment,omitempty"` + // Whether the pipeline is in Development mode. Defaults to false. + Development bool `json:"development,omitempty"` + // Pipeline product edition. + Edition string `json:"edition,omitempty"` + // If present, the last-modified time of the pipeline settings before the + // edit. If the settings were modified after that time, then the request + // will fail with a conflict. + ExpectedLastModified int64 `json:"expected_last_modified,omitempty"` + // Filters on which Pipeline packages to include in the deployed graph. + Filters *Filters `json:"filters,omitempty"` + // The definition of a gateway pipeline to support change data capture. + GatewayDefinition *IngestionGatewayPipelineDefinition `json:"gateway_definition,omitempty"` + // Unique identifier for this pipeline. + Id string `json:"id,omitempty"` + // The configuration for a managed ingestion pipeline. These settings cannot + // be used with the 'libraries', 'target' or 'catalog' settings. + IngestionDefinition *IngestionPipelineDefinition `json:"ingestion_definition,omitempty"` + // Libraries or code needed by this deployment. + Libraries []PipelineLibrary `json:"libraries,omitempty"` + // Friendly identifier for this pipeline. + Name string `json:"name,omitempty"` + // List of notification settings for this pipeline. + Notifications []Notifications `json:"notifications,omitempty"` + // Whether Photon is enabled for this pipeline. + Photon bool `json:"photon,omitempty"` + // Unique identifier for this pipeline. + PipelineId string `json:"pipeline_id,omitempty" url:"-"` + // Restart window of this pipeline. + RestartWindow *RestartWindow `json:"restart_window,omitempty"` + // Write-only setting, available only in Create/Update calls. Specifies the + // user or service principal that the pipeline runs as. If not specified, + // the pipeline runs as the user who created the pipeline. + // + // Only `user_name` or `service_principal_name` can be specified. If both + // are specified, an error is thrown. + RunAs *RunAs `json:"run_as,omitempty"` + // The default schema (database) where tables are read from or published to. + // The presence of this field implies that the pipeline is in direct + // publishing mode. + Schema string `json:"schema,omitempty"` + // Whether serverless compute is enabled for this pipeline. + Serverless bool `json:"serverless,omitempty"` + // DBFS root directory for storing checkpoints and tables. + Storage string `json:"storage,omitempty"` + // Target schema (database) to add tables in this pipeline to. If not + // specified, no data is published to the Hive metastore or Unity Catalog. + // To publish to Unity Catalog, also specify `catalog`. + Target string `json:"target,omitempty"` + // Which pipeline trigger to use. Deprecated: Use `continuous` instead. + Trigger *PipelineTrigger `json:"trigger,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EditPipeline) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EditPipeline) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EditPipelineResponse struct { +} + +type ErrorDetail struct { + // The exception thrown for this error, with its chain of cause. + Exceptions []SerializedException `json:"exceptions,omitempty"` + // Whether this error is considered fatal, that is, unrecoverable. + Fatal bool `json:"fatal,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ErrorDetail) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ErrorDetail) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The severity level of the event. +type EventLevel string + +const EventLevelError EventLevel = `ERROR` + +const EventLevelInfo EventLevel = `INFO` + +const EventLevelMetrics EventLevel = `METRICS` + +const EventLevelWarn EventLevel = `WARN` + +// String representation for [fmt.Print] +func (f *EventLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EventLevel) Set(v string) error { + switch v { + case `ERROR`, `INFO`, `METRICS`, `WARN`: + *f = EventLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ERROR", "INFO", "METRICS", "WARN"`, v) + } +} + +// Type always returns EventLevel to satisfy [pflag.Value] interface +func (f *EventLevel) Type() string { + return "EventLevel" +} + +type FileLibrary struct { + // The absolute path of the file. + Path string `json:"path,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *FileLibrary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FileLibrary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Filters struct { + // Paths to exclude. + Exclude []string `json:"exclude,omitempty"` + // Paths to include. + Include []string `json:"include,omitempty"` +} + +type GcpAttributes struct { + // This field determines whether the instance pool will contain preemptible + // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs + // if the former is unavailable. + Availability GcpAvailability `json:"availability,omitempty"` + // boot disk size in GB + BootDiskSize int `json:"boot_disk_size,omitempty"` + // If provided, the cluster will impersonate the google service account when + // accessing gcloud services (like GCS). The google service account must + // have previously been added to the Databricks environment by an account + // administrator. + GoogleServiceAccount string `json:"google_service_account,omitempty"` + // If provided, each node (workers and driver) in the cluster will have this + // number of local SSDs attached. Each local SSD is 375GB in size. Refer to + // [GCP documentation] for the supported number of local SSDs for each + // instance type. + // + // [GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds + LocalSsdCount int `json:"local_ssd_count,omitempty"` + // This field determines whether the spark executors will be scheduled to + // run on preemptible VMs (when set to true) versus standard compute engine + // VMs (when set to false; default). Note: Soon to be deprecated, use the + // availability field instead. + UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` + // Identifier for the availability zone in which the cluster resides. This + // can be one of the following: - "HA" => High availability, spread nodes + // across availability zones for a Databricks deployment region [default] - + // "AUTO" => Databricks picks an availability zone to schedule the cluster + // on. - A GCP availability zone => Pick One of the available zones for + // (machine type + region) from + // https://cloud.google.com/compute/docs/regions-zones. + ZoneId string `json:"zone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GcpAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GcpAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// This field determines whether the instance pool will contain preemptible VMs, +// on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the +// former is unavailable. +type GcpAvailability string + +const GcpAvailabilityOnDemandGcp GcpAvailability = `ON_DEMAND_GCP` + +const GcpAvailabilityPreemptibleGcp GcpAvailability = `PREEMPTIBLE_GCP` + +const GcpAvailabilityPreemptibleWithFallbackGcp GcpAvailability = `PREEMPTIBLE_WITH_FALLBACK_GCP` + +// String representation for [fmt.Print] +func (f *GcpAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GcpAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_GCP`, `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP`: + *f = GcpAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_GCP", "PREEMPTIBLE_GCP", "PREEMPTIBLE_WITH_FALLBACK_GCP"`, v) + } +} + +// Type always returns GcpAvailability to satisfy [pflag.Value] interface +func (f *GcpAvailability) Type() string { + return "GcpAvailability" +} + +type GcsStorageInfo struct { + // GCS destination/URI, e.g. `gs://my-bucket/some-prefix` + Destination string `json:"destination"` +} + +// Get pipeline permission levels +type GetPipelinePermissionLevelsRequest struct { + // The pipeline for which to get or manage permissions. + PipelineId string `json:"-" url:"-"` +} + +type GetPipelinePermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []PipelinePermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get pipeline permissions +type GetPipelinePermissionsRequest struct { + // The pipeline for which to get or manage permissions. + PipelineId string `json:"-" url:"-"` +} + +// Get a pipeline +type GetPipelineRequest struct { + PipelineId string `json:"-" url:"-"` +} + +type GetPipelineResponse struct { + // An optional message detailing the cause of the pipeline state. + Cause string `json:"cause,omitempty"` + // The ID of the cluster that the pipeline is running on. + ClusterId string `json:"cluster_id,omitempty"` + // The username of the pipeline creator. + CreatorUserName string `json:"creator_user_name,omitempty"` + // Serverless budget policy ID of this pipeline. + EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` + // The health of a pipeline. + Health GetPipelineResponseHealth `json:"health,omitempty"` + // The last time the pipeline settings were modified or created. + LastModified int64 `json:"last_modified,omitempty"` + // Status of the latest updates for the pipeline. Ordered with the newest + // update first. + LatestUpdates []UpdateStateInfo `json:"latest_updates,omitempty"` + // A human friendly identifier for the pipeline, taken from the `spec`. + Name string `json:"name,omitempty"` + // The ID of the pipeline. + PipelineId string `json:"pipeline_id,omitempty"` + // Username of the user that the pipeline will run on behalf of. + RunAsUserName string `json:"run_as_user_name,omitempty"` + // The pipeline specification. This field is not returned when called by + // `ListPipelines`. + Spec *PipelineSpec `json:"spec,omitempty"` + // The pipeline state. + State PipelineState `json:"state,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetPipelineResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetPipelineResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The health of a pipeline. +type GetPipelineResponseHealth string + +const GetPipelineResponseHealthHealthy GetPipelineResponseHealth = `HEALTHY` + +const GetPipelineResponseHealthUnhealthy GetPipelineResponseHealth = `UNHEALTHY` + +// String representation for [fmt.Print] +func (f *GetPipelineResponseHealth) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetPipelineResponseHealth) Set(v string) error { + switch v { + case `HEALTHY`, `UNHEALTHY`: + *f = GetPipelineResponseHealth(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "HEALTHY", "UNHEALTHY"`, v) + } +} + +// Type always returns GetPipelineResponseHealth to satisfy [pflag.Value] interface +func (f *GetPipelineResponseHealth) Type() string { + return "GetPipelineResponseHealth" +} + +// Get a pipeline update +type GetUpdateRequest struct { + // The ID of the pipeline. + PipelineId string `json:"-" url:"-"` + // The ID of the update. + UpdateId string `json:"-" url:"-"` +} + +type GetUpdateResponse struct { + // The current update info. + Update *UpdateInfo `json:"update,omitempty"` +} + +type IngestionConfig struct { + // Select a specific source report. + Report *ReportSpec `json:"report,omitempty"` + // Select all tables from a specific source schema. + Schema *SchemaSpec `json:"schema,omitempty"` + // Select a specific source table. + Table *TableSpec `json:"table,omitempty"` +} + +type IngestionGatewayPipelineDefinition struct { + // [Deprecated, use connection_name instead] Immutable. The Unity Catalog + // connection that this gateway pipeline uses to communicate with the + // source. + ConnectionId string `json:"connection_id,omitempty"` + // Immutable. The Unity Catalog connection that this gateway pipeline uses + // to communicate with the source. + ConnectionName string `json:"connection_name,omitempty"` + // Required, Immutable. The name of the catalog for the gateway pipeline's + // storage location. + GatewayStorageCatalog string `json:"gateway_storage_catalog,omitempty"` + // Optional. The Unity Catalog-compatible name for the gateway storage + // location. This is the destination to use for the data that is extracted + // by the gateway. Delta Live Tables system will automatically create the + // storage location under the catalog and schema. + GatewayStorageName string `json:"gateway_storage_name,omitempty"` + // Required, Immutable. The name of the schema for the gateway pipelines's + // storage location. + GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *IngestionGatewayPipelineDefinition) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s IngestionGatewayPipelineDefinition) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type IngestionPipelineDefinition struct { + // Immutable. The Unity Catalog connection that this ingestion pipeline uses + // to communicate with the source. This is used with connectors for + // applications like Salesforce, Workday, and so on. + ConnectionName string `json:"connection_name,omitempty"` + // Immutable. Identifier for the gateway that is used by this ingestion + // pipeline to communicate with the source database. This is used with + // connectors to databases like SQL Server. + IngestionGatewayId string `json:"ingestion_gateway_id,omitempty"` + // Required. Settings specifying tables to replicate and the destination for + // the replicated tables. + Objects []IngestionConfig `json:"objects,omitempty"` + // Configuration settings to control the ingestion of tables. These settings + // are applied to all tables in the pipeline. + TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *IngestionPipelineDefinition) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s IngestionPipelineDefinition) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type InitScriptInfo struct { + // destination needs to be provided. e.g. `{ "abfss" : { "destination" : + // "abfss://@.dfs.core.windows.net/" + // } } + Abfss *Adlsgen2Info `json:"abfss,omitempty"` + // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : + // "dbfs:/home/cluster_log" } }` + Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` + // destination needs to be provided. e.g. `{ "file" : { "destination" : + // "file:/my/local/file.sh" } }` + File *LocalFileInfo `json:"file,omitempty"` + // destination needs to be provided. e.g. `{ "gcs": { "destination": + // "gs://my-bucket/file.sh" } }` + Gcs *GcsStorageInfo `json:"gcs,omitempty"` + // destination and either the region or endpoint need to be provided. e.g. + // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : + // "us-west-2" } }` Cluster iam role is used to access s3, please make sure + // the cluster iam role in `instance_profile_arn` has permission to write + // data to the s3 destination. + S3 *S3StorageInfo `json:"s3,omitempty"` + // destination needs to be provided. e.g. `{ "volumes" : { "destination" : + // "/Volumes/my-init.sh" } }` + Volumes *VolumesStorageInfo `json:"volumes,omitempty"` + // destination needs to be provided. e.g. `{ "workspace" : { "destination" : + // "/Users/user1@databricks.com/my-init.sh" } }` + Workspace *WorkspaceStorageInfo `json:"workspace,omitempty"` +} + +// List pipeline events +type ListPipelineEventsRequest struct { + // Criteria to select a subset of results, expressed using a SQL-like + // syntax. The supported filters are: 1. level='INFO' (or WARN or ERROR) 2. + // level in ('INFO', 'WARN') 3. id='[event-id]' 4. timestamp > 'TIMESTAMP' + // (or >=,<,<=,=) + // + // Composite expressions are supported, for example: level in ('ERROR', + // 'WARN') AND timestamp> '2021-07-22T06:37:33.083Z' + Filter string `json:"-" url:"filter,omitempty"` + // Max number of entries to return in a single page. The system may return + // fewer than max_results events in a response, even if there are more + // events available. + MaxResults int `json:"-" url:"max_results,omitempty"` + // A string indicating a sort order by timestamp for the results, for + // example, ["timestamp asc"]. The sort order can be ascending or + // descending. By default, events are returned in descending order by + // timestamp. + OrderBy []string `json:"-" url:"order_by,omitempty"` + // Page token returned by previous call. This field is mutually exclusive + // with all fields in this request except max_results. An error is returned + // if any fields other than max_results are set when this field is set. + PageToken string `json:"-" url:"page_token,omitempty"` + + PipelineId string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListPipelineEventsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListPipelineEventsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListPipelineEventsResponse struct { + // The list of events matching the request criteria. + Events []PipelineEvent `json:"events,omitempty"` + // If present, a token to fetch the next page of events. + NextPageToken string `json:"next_page_token,omitempty"` + // If present, a token to fetch the previous page of events. + PrevPageToken string `json:"prev_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListPipelineEventsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListPipelineEventsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List pipelines +type ListPipelinesRequest struct { + // Select a subset of results based on the specified criteria. The supported + // filters are: + // + // * `notebook=''` to select pipelines that reference the provided + // notebook path. * `name LIKE '[pattern]'` to select pipelines with a name + // that matches pattern. Wildcards are supported, for example: `name LIKE + // '%shopping%'` + // + // Composite filters are not supported. This field is optional. + Filter string `json:"-" url:"filter,omitempty"` + // The maximum number of entries to return in a single page. The system may + // return fewer than max_results events in a response, even if there are + // more events available. This field is optional. The default value is 25. + // The maximum value is 100. An error is returned if the value of + // max_results is greater than 100. + MaxResults int `json:"-" url:"max_results,omitempty"` + // A list of strings specifying the order of results. Supported order_by + // fields are id and name. The default is id asc. This field is optional. + OrderBy []string `json:"-" url:"order_by,omitempty"` + // Page token returned by previous call + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListPipelinesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListPipelinesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListPipelinesResponse struct { + // If present, a token to fetch the next page of events. + NextPageToken string `json:"next_page_token,omitempty"` + // The list of events matching the request criteria. + Statuses []PipelineStateInfo `json:"statuses,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListPipelinesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListPipelinesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List pipeline updates +type ListUpdatesRequest struct { + // Max number of entries to return in a single page. + MaxResults int `json:"-" url:"max_results,omitempty"` + // Page token returned by previous call + PageToken string `json:"-" url:"page_token,omitempty"` + // The pipeline to return updates for. + PipelineId string `json:"-" url:"-"` + // If present, returns updates until and including this update_id. + UntilUpdateId string `json:"-" url:"until_update_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListUpdatesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListUpdatesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListUpdatesResponse struct { + // If present, then there are more results, and this a token to be used in a + // subsequent request to fetch the next page. + NextPageToken string `json:"next_page_token,omitempty"` + // If present, then this token can be used in a subsequent request to fetch + // the previous page. + PrevPageToken string `json:"prev_page_token,omitempty"` + + Updates []UpdateInfo `json:"updates,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListUpdatesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListUpdatesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type LocalFileInfo struct { + // local file destination, e.g. `file:/my/local/file.sh` + Destination string `json:"destination"` +} + +type LogAnalyticsInfo struct { + // + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + // + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LogAnalyticsInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LogAnalyticsInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ManualTrigger struct { +} + +// Maturity level for EventDetails. +type MaturityLevel string + +const MaturityLevelDeprecated MaturityLevel = `DEPRECATED` + +const MaturityLevelEvolving MaturityLevel = `EVOLVING` + +const MaturityLevelStable MaturityLevel = `STABLE` + +// String representation for [fmt.Print] +func (f *MaturityLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MaturityLevel) Set(v string) error { + switch v { + case `DEPRECATED`, `EVOLVING`, `STABLE`: + *f = MaturityLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DEPRECATED", "EVOLVING", "STABLE"`, v) + } +} + +// Type always returns MaturityLevel to satisfy [pflag.Value] interface +func (f *MaturityLevel) Type() string { + return "MaturityLevel" +} + +type MavenLibrary struct { + // Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2". + Coordinates string `json:"coordinates"` + // List of dependences to exclude. For example: `["slf4j:slf4j", + // "*:hadoop-client"]`. + // + // Maven dependency exclusions: + // https://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html. + Exclusions []string `json:"exclusions,omitempty"` + // Maven repo to install the Maven package from. If omitted, both Maven + // Central Repository and Spark Packages are searched. + Repo string `json:"repo,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MavenLibrary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MavenLibrary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type NotebookLibrary struct { + // The absolute path of the notebook. + Path string `json:"path,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NotebookLibrary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NotebookLibrary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Notifications struct { + // A list of alerts that trigger the sending of notifications to the + // configured destinations. The supported alerts are: + // + // * `on-update-success`: A pipeline update completes successfully. * + // `on-update-failure`: Each time a pipeline update fails. * + // `on-update-fatal-failure`: A pipeline update fails with a non-retryable + // (fatal) error. * `on-flow-failure`: A single data flow fails. + Alerts []string `json:"alerts,omitempty"` + // A list of email addresses notified when a configured alert is triggered. + EmailRecipients []string `json:"email_recipients,omitempty"` +} + +type Origin struct { + // The id of a batch. Unique within a flow. + BatchId int `json:"batch_id,omitempty"` + // The cloud provider, e.g., AWS or Azure. + Cloud string `json:"cloud,omitempty"` + // The id of the cluster where an execution happens. Unique within a region. + ClusterId string `json:"cluster_id,omitempty"` + // The name of a dataset. Unique within a pipeline. + DatasetName string `json:"dataset_name,omitempty"` + // The id of the flow. Globally unique. Incremental queries will generally + // reuse the same id while complete queries will have a new id per update. + FlowId string `json:"flow_id,omitempty"` + // The name of the flow. Not unique. + FlowName string `json:"flow_name,omitempty"` + // The optional host name where the event was triggered + Host string `json:"host,omitempty"` + // The id of a maintenance run. Globally unique. + MaintenanceId string `json:"maintenance_id,omitempty"` + // Materialization name. + MaterializationName string `json:"materialization_name,omitempty"` + // The org id of the user. Unique within a cloud. + OrgId int `json:"org_id,omitempty"` + // The id of the pipeline. Globally unique. + PipelineId string `json:"pipeline_id,omitempty"` + // The name of the pipeline. Not unique. + PipelineName string `json:"pipeline_name,omitempty"` + // The cloud region. + Region string `json:"region,omitempty"` + // The id of the request that caused an update. + RequestId string `json:"request_id,omitempty"` + // The id of a (delta) table. Globally unique. + TableId string `json:"table_id,omitempty"` + // The Unity Catalog id of the MV or ST being updated. + UcResourceId string `json:"uc_resource_id,omitempty"` + // The id of an execution. Globally unique. + UpdateId string `json:"update_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Origin) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Origin) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PipelineAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel PipelinePermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PipelineAccessControlResponse struct { + // All permissions. + AllPermissions []PipelinePermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PipelineCluster struct { + // Note: This field won't be persisted. Only API users will check this + // field. + ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *PipelineClusterAutoscale `json:"autoscale,omitempty"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` + // The configuration for delivering spark logs to a long-term storage + // destination. Only dbfs destinations are supported. Only one destination + // can be specified for one cluster. If the conf is given, the logs will be + // delivered to the destination every `5 mins`. The destination of driver + // logs is `$destination/$clusterId/driver`, while the destination of + // executor logs is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]string `json:"custom_tags,omitempty"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + // Whether to enable local disk encryption for the cluster. + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `json:"gcp_attributes,omitempty"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId string `json:"instance_pool_id,omitempty"` + // A label for the cluster specification, either `default` to configure the + // default cluster, or `maintenance` to configure the maintenance cluster. + // This field is optional. The default value is `default`. + Label string `json:"label,omitempty"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId string `json:"node_type_id,omitempty"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers int `json:"num_workers,omitempty"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId string `json:"policy_id,omitempty"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. See :method:clusters/create for more + // details. + SparkConf map[string]string `json:"spark_conf,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineCluster) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineCluster) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PipelineClusterAutoscale struct { + // The maximum number of workers to which the cluster can scale up when + // overloaded. `max_workers` must be strictly greater than `min_workers`. + MaxWorkers int `json:"max_workers"` + // The minimum number of workers the cluster can scale down to when + // underutilized. It is also the initial number of workers the cluster will + // have after creation. + MinWorkers int `json:"min_workers"` + // Databricks Enhanced Autoscaling optimizes cluster utilization by + // automatically allocating cluster resources based on workload volume, with + // minimal impact to the data processing latency of your pipelines. Enhanced + // Autoscaling is available for `updates` clusters only. The legacy + // autoscaling feature is used for `maintenance` clusters. + Mode PipelineClusterAutoscaleMode `json:"mode,omitempty"` +} + +// Databricks Enhanced Autoscaling optimizes cluster utilization by +// automatically allocating cluster resources based on workload volume, with +// minimal impact to the data processing latency of your pipelines. Enhanced +// Autoscaling is available for `updates` clusters only. The legacy autoscaling +// feature is used for `maintenance` clusters. +type PipelineClusterAutoscaleMode string + +const PipelineClusterAutoscaleModeEnhanced PipelineClusterAutoscaleMode = `ENHANCED` + +const PipelineClusterAutoscaleModeLegacy PipelineClusterAutoscaleMode = `LEGACY` + +// String representation for [fmt.Print] +func (f *PipelineClusterAutoscaleMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PipelineClusterAutoscaleMode) Set(v string) error { + switch v { + case `ENHANCED`, `LEGACY`: + *f = PipelineClusterAutoscaleMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ENHANCED", "LEGACY"`, v) + } +} + +// Type always returns PipelineClusterAutoscaleMode to satisfy [pflag.Value] interface +func (f *PipelineClusterAutoscaleMode) Type() string { + return "PipelineClusterAutoscaleMode" +} + +type PipelineDeployment struct { + // The deployment method that manages the pipeline. + Kind DeploymentKind `json:"kind,omitempty"` + // The path to the file containing metadata about the deployment. + MetadataFilePath string `json:"metadata_file_path,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineDeployment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineDeployment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PipelineEvent struct { + // Information about an error captured by the event. + Error *ErrorDetail `json:"error,omitempty"` + // The event type. Should always correspond to the details + EventType string `json:"event_type,omitempty"` + // A time-based, globally unique id. + Id string `json:"id,omitempty"` + // The severity level of the event. + Level EventLevel `json:"level,omitempty"` + // Maturity level for event_type. + MaturityLevel MaturityLevel `json:"maturity_level,omitempty"` + // The display message associated with the event. + Message string `json:"message,omitempty"` + // Describes where the event originates from. + Origin *Origin `json:"origin,omitempty"` + // A sequencing object to identify and order events. + Sequence *Sequencing `json:"sequence,omitempty"` + // The time of the event. + Timestamp string `json:"timestamp,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineEvent) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineEvent) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PipelineLibrary struct { + // The path to a file that defines a pipeline and is stored in the + // Databricks Repos. + File *FileLibrary `json:"file,omitempty"` + // URI of the jar to be installed. Currently only DBFS is supported. + Jar string `json:"jar,omitempty"` + // Specification of a maven library to be installed. + Maven *MavenLibrary `json:"maven,omitempty"` + // The path to a notebook that defines a pipeline and is stored in the + // Databricks workspace. + Notebook *NotebookLibrary `json:"notebook,omitempty"` + // URI of the whl to be installed. + Whl string `json:"whl,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineLibrary) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineLibrary) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PipelinePermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel PipelinePermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelinePermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelinePermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type PipelinePermissionLevel string + +const PipelinePermissionLevelCanManage PipelinePermissionLevel = `CAN_MANAGE` + +const PipelinePermissionLevelCanRun PipelinePermissionLevel = `CAN_RUN` + +const PipelinePermissionLevelCanView PipelinePermissionLevel = `CAN_VIEW` + +const PipelinePermissionLevelIsOwner PipelinePermissionLevel = `IS_OWNER` + +// String representation for [fmt.Print] +func (f *PipelinePermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PipelinePermissionLevel) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_RUN`, `CAN_VIEW`, `IS_OWNER`: + *f = PipelinePermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_RUN", "CAN_VIEW", "IS_OWNER"`, v) + } +} + +// Type always returns PipelinePermissionLevel to satisfy [pflag.Value] interface +func (f *PipelinePermissionLevel) Type() string { + return "PipelinePermissionLevel" +} + +type PipelinePermissions struct { + AccessControlList []PipelineAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelinePermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelinePermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PipelinePermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel PipelinePermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelinePermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelinePermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PipelinePermissionsRequest struct { + AccessControlList []PipelineAccessControlRequest `json:"access_control_list,omitempty"` + // The pipeline for which to get or manage permissions. + PipelineId string `json:"-" url:"-"` +} + +type PipelineSpec struct { + // Budget policy of this pipeline. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` + // A catalog in Unity Catalog to publish data from this pipeline to. If + // `target` is specified, tables in this pipeline are published to a + // `target` schema inside `catalog` (for example, + // `catalog`.`target`.`table`). If `target` is not specified, no data is + // published to Unity Catalog. + Catalog string `json:"catalog,omitempty"` + // DLT Release Channel that specifies which version to use. + Channel string `json:"channel,omitempty"` + // Cluster settings for this pipeline deployment. + Clusters []PipelineCluster `json:"clusters,omitempty"` + // String-String configuration for this pipeline execution. + Configuration map[string]string `json:"configuration,omitempty"` + // Whether the pipeline is continuous or triggered. This replaces `trigger`. + Continuous bool `json:"continuous,omitempty"` + // Deployment type of this pipeline. + Deployment *PipelineDeployment `json:"deployment,omitempty"` + // Whether the pipeline is in Development mode. Defaults to false. + Development bool `json:"development,omitempty"` + // Pipeline product edition. + Edition string `json:"edition,omitempty"` + // Filters on which Pipeline packages to include in the deployed graph. + Filters *Filters `json:"filters,omitempty"` + // The definition of a gateway pipeline to support change data capture. + GatewayDefinition *IngestionGatewayPipelineDefinition `json:"gateway_definition,omitempty"` + // Unique identifier for this pipeline. + Id string `json:"id,omitempty"` + // The configuration for a managed ingestion pipeline. These settings cannot + // be used with the 'libraries', 'target' or 'catalog' settings. + IngestionDefinition *IngestionPipelineDefinition `json:"ingestion_definition,omitempty"` + // Libraries or code needed by this deployment. + Libraries []PipelineLibrary `json:"libraries,omitempty"` + // Friendly identifier for this pipeline. + Name string `json:"name,omitempty"` + // List of notification settings for this pipeline. + Notifications []Notifications `json:"notifications,omitempty"` + // Whether Photon is enabled for this pipeline. + Photon bool `json:"photon,omitempty"` + // Restart window of this pipeline. + RestartWindow *RestartWindow `json:"restart_window,omitempty"` + // The default schema (database) where tables are read from or published to. + // The presence of this field implies that the pipeline is in direct + // publishing mode. + Schema string `json:"schema,omitempty"` + // Whether serverless compute is enabled for this pipeline. + Serverless bool `json:"serverless,omitempty"` + // DBFS root directory for storing checkpoints and tables. + Storage string `json:"storage,omitempty"` + // Target schema (database) to add tables in this pipeline to. If not + // specified, no data is published to the Hive metastore or Unity Catalog. + // To publish to Unity Catalog, also specify `catalog`. + Target string `json:"target,omitempty"` + // Which pipeline trigger to use. Deprecated: Use `continuous` instead. + Trigger *PipelineTrigger `json:"trigger,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The pipeline state. +type PipelineState string + +const PipelineStateDeleted PipelineState = `DELETED` + +const PipelineStateDeploying PipelineState = `DEPLOYING` + +const PipelineStateFailed PipelineState = `FAILED` + +const PipelineStateIdle PipelineState = `IDLE` + +const PipelineStateRecovering PipelineState = `RECOVERING` + +const PipelineStateResetting PipelineState = `RESETTING` + +const PipelineStateRunning PipelineState = `RUNNING` + +const PipelineStateStarting PipelineState = `STARTING` + +const PipelineStateStopping PipelineState = `STOPPING` + +// String representation for [fmt.Print] +func (f *PipelineState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PipelineState) Set(v string) error { + switch v { + case `DELETED`, `DEPLOYING`, `FAILED`, `IDLE`, `RECOVERING`, `RESETTING`, `RUNNING`, `STARTING`, `STOPPING`: + *f = PipelineState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELETED", "DEPLOYING", "FAILED", "IDLE", "RECOVERING", "RESETTING", "RUNNING", "STARTING", "STOPPING"`, v) + } +} + +// Type always returns PipelineState to satisfy [pflag.Value] interface +func (f *PipelineState) Type() string { + return "PipelineState" +} + +type PipelineStateInfo struct { + // The unique identifier of the cluster running the pipeline. + ClusterId string `json:"cluster_id,omitempty"` + // The username of the pipeline creator. + CreatorUserName string `json:"creator_user_name,omitempty"` + // The health of a pipeline. + Health PipelineStateInfoHealth `json:"health,omitempty"` + // Status of the latest updates for the pipeline. Ordered with the newest + // update first. + LatestUpdates []UpdateStateInfo `json:"latest_updates,omitempty"` + // The user-friendly name of the pipeline. + Name string `json:"name,omitempty"` + // The unique identifier of the pipeline. + PipelineId string `json:"pipeline_id,omitempty"` + // The username that the pipeline runs as. This is a read only value derived + // from the pipeline owner. + RunAsUserName string `json:"run_as_user_name,omitempty"` + // The pipeline state. + State PipelineState `json:"state,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PipelineStateInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PipelineStateInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The health of a pipeline. +type PipelineStateInfoHealth string + +const PipelineStateInfoHealthHealthy PipelineStateInfoHealth = `HEALTHY` + +const PipelineStateInfoHealthUnhealthy PipelineStateInfoHealth = `UNHEALTHY` + +// String representation for [fmt.Print] +func (f *PipelineStateInfoHealth) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PipelineStateInfoHealth) Set(v string) error { + switch v { + case `HEALTHY`, `UNHEALTHY`: + *f = PipelineStateInfoHealth(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "HEALTHY", "UNHEALTHY"`, v) + } +} + +// Type always returns PipelineStateInfoHealth to satisfy [pflag.Value] interface +func (f *PipelineStateInfoHealth) Type() string { + return "PipelineStateInfoHealth" +} + +type PipelineTrigger struct { + Cron *CronTrigger `json:"cron,omitempty"` + + Manual *ManualTrigger `json:"manual,omitempty"` +} + +type ReportSpec struct { + // Required. Destination catalog to store table. + DestinationCatalog string `json:"destination_catalog,omitempty"` + // Required. Destination schema to store table. + DestinationSchema string `json:"destination_schema,omitempty"` + // Required. Destination table name. The pipeline fails if a table with that + // name already exists. + DestinationTable string `json:"destination_table,omitempty"` + // Required. Report URL in the source system. + SourceUrl string `json:"source_url,omitempty"` + // Configuration settings to control the ingestion of tables. These settings + // override the table_configuration defined in the + // IngestionPipelineDefinition object. + TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ReportSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ReportSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RestartWindow struct { + // Days of week in which the restart is allowed to happen (within a + // five-hour window starting at start_hour). If not specified all days of + // the week will be used. + DaysOfWeek []DayOfWeek `json:"days_of_week,omitempty"` + // An integer between 0 and 23 denoting the start hour for the restart + // window in the 24-hour day. Continuous pipeline restart is triggered only + // within a five-hour window starting at this hour. + StartHour int `json:"start_hour"` + // Time zone id of restart window. See + // https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html + // for details. If not specified, UTC will be used. + TimeZoneId string `json:"time_zone_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RestartWindow) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RestartWindow) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Write-only setting, available only in Create/Update calls. Specifies the user +// or service principal that the pipeline runs as. If not specified, the +// pipeline runs as the user who created the pipeline. +// +// Only `user_name` or `service_principal_name` can be specified. If both are +// specified, an error is thrown. +type RunAs struct { + // Application ID of an active service principal. Setting this field + // requires the `servicePrincipal/user` role. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // The email of an active workspace user. Users can only set this field to + // their own email. + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RunAs) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RunAs) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type S3StorageInfo struct { + // (Optional) Set canned access control list for the logs, e.g. + // `bucket-owner-full-control`. If `canned_cal` is set, please make sure the + // cluster iam role has `s3:PutObjectAcl` permission on the destination + // bucket and prefix. The full list of possible canned acl can be found at + // http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl. + // Please also note that by default only the object owner gets full + // controls. If you are using cross account role for writing data, you may + // want to set `bucket-owner-full-control` to make bucket owner able to read + // the logs. + CannedAcl string `json:"canned_acl,omitempty"` + // S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be + // delivered using cluster iam role, please make sure you set cluster iam + // role and the role has write access to the destination. Please also note + // that you cannot use AWS keys to deliver logs. + Destination string `json:"destination"` + // (Optional) Flag to enable server side encryption, `false` by default. + EnableEncryption bool `json:"enable_encryption,omitempty"` + // (Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It + // will be used only when encryption is enabled and the default type is + // `sse-s3`. + EncryptionType string `json:"encryption_type,omitempty"` + // S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or + // endpoint needs to be set. If both are set, endpoint will be used. + Endpoint string `json:"endpoint,omitempty"` + // (Optional) Kms key which will be used if encryption is enabled and + // encryption type is set to `sse-kms`. + KmsKey string `json:"kms_key,omitempty"` + // S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. + // If both are set, endpoint will be used. + Region string `json:"region,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *S3StorageInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s S3StorageInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SchemaSpec struct { + // Required. Destination catalog to store tables. + DestinationCatalog string `json:"destination_catalog,omitempty"` + // Required. Destination schema to store tables in. Tables with the same + // name as the source tables are created in this destination schema. The + // pipeline fails If a table with the same name already exists. + DestinationSchema string `json:"destination_schema,omitempty"` + // The source catalog name. Might be optional depending on the type of + // source. + SourceCatalog string `json:"source_catalog,omitempty"` + // Required. Schema name in the source database. + SourceSchema string `json:"source_schema,omitempty"` + // Configuration settings to control the ingestion of tables. These settings + // are applied to all tables in this schema and override the + // table_configuration defined in the IngestionPipelineDefinition object. + TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SchemaSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SchemaSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Sequencing struct { + // A sequence number, unique and increasing within the control plane. + ControlPlaneSeqNo int `json:"control_plane_seq_no,omitempty"` + // the ID assigned by the data plane. + DataPlaneId *DataPlaneId `json:"data_plane_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Sequencing) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Sequencing) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SerializedException struct { + // Runtime class of the exception + ClassName string `json:"class_name,omitempty"` + // Exception message + Message string `json:"message,omitempty"` + // Stack trace consisting of a list of stack frames + Stack []StackFrame `json:"stack,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SerializedException) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SerializedException) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type StackFrame struct { + // Class from which the method call originated + DeclaringClass string `json:"declaring_class,omitempty"` + // File where the method is defined + FileName string `json:"file_name,omitempty"` + // Line from which the method was called + LineNumber int `json:"line_number,omitempty"` + // Name of the method which was called + MethodName string `json:"method_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *StackFrame) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s StackFrame) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type StartUpdate struct { + Cause StartUpdateCause `json:"cause,omitempty"` + // If true, this update will reset all tables before running. + FullRefresh bool `json:"full_refresh,omitempty"` + // A list of tables to update with fullRefresh. If both refresh_selection + // and full_refresh_selection are empty, this is a full graph update. Full + // Refresh on a table means that the states of the table will be reset + // before the refresh. + FullRefreshSelection []string `json:"full_refresh_selection,omitempty"` + + PipelineId string `json:"-" url:"-"` + // A list of tables to update without fullRefresh. If both refresh_selection + // and full_refresh_selection are empty, this is a full graph update. Full + // Refresh on a table means that the states of the table will be reset + // before the refresh. + RefreshSelection []string `json:"refresh_selection,omitempty"` + // If true, this update only validates the correctness of pipeline source + // code but does not materialize or publish any datasets. + ValidateOnly bool `json:"validate_only,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *StartUpdate) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s StartUpdate) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type StartUpdateCause string + +const StartUpdateCauseApiCall StartUpdateCause = `API_CALL` + +const StartUpdateCauseJobTask StartUpdateCause = `JOB_TASK` + +const StartUpdateCauseRetryOnFailure StartUpdateCause = `RETRY_ON_FAILURE` + +const StartUpdateCauseSchemaChange StartUpdateCause = `SCHEMA_CHANGE` + +const StartUpdateCauseServiceUpgrade StartUpdateCause = `SERVICE_UPGRADE` + +const StartUpdateCauseUserAction StartUpdateCause = `USER_ACTION` + +// String representation for [fmt.Print] +func (f *StartUpdateCause) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *StartUpdateCause) Set(v string) error { + switch v { + case `API_CALL`, `JOB_TASK`, `RETRY_ON_FAILURE`, `SCHEMA_CHANGE`, `SERVICE_UPGRADE`, `USER_ACTION`: + *f = StartUpdateCause(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "API_CALL", "JOB_TASK", "RETRY_ON_FAILURE", "SCHEMA_CHANGE", "SERVICE_UPGRADE", "USER_ACTION"`, v) + } +} + +// Type always returns StartUpdateCause to satisfy [pflag.Value] interface +func (f *StartUpdateCause) Type() string { + return "StartUpdateCause" +} + +type StartUpdateResponse struct { + UpdateId string `json:"update_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *StartUpdateResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s StartUpdateResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type StopPipelineResponse struct { +} + +// Stop a pipeline +type StopRequest struct { + PipelineId string `json:"-" url:"-"` +} + +type TableSpec struct { + // Required. Destination catalog to store table. + DestinationCatalog string `json:"destination_catalog,omitempty"` + // Required. Destination schema to store table. + DestinationSchema string `json:"destination_schema,omitempty"` + // Optional. Destination table name. The pipeline fails if a table with that + // name already exists. If not set, the source table name is used. + DestinationTable string `json:"destination_table,omitempty"` + // Source catalog name. Might be optional depending on the type of source. + SourceCatalog string `json:"source_catalog,omitempty"` + // Schema name in the source database. Might be optional depending on the + // type of source. + SourceSchema string `json:"source_schema,omitempty"` + // Required. Table name in the source database. + SourceTable string `json:"source_table,omitempty"` + // Configuration settings to control the ingestion of tables. These settings + // override the table_configuration defined in the + // IngestionPipelineDefinition object and the SchemaSpec. + TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TableSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TableSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TableSpecificConfig struct { + // The primary key of the table used to apply changes. + PrimaryKeys []string `json:"primary_keys,omitempty"` + // If true, formula fields defined in the table are included in the + // ingestion. This setting is only valid for the Salesforce connector + SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` + // The SCD type to use to ingest the table. + ScdType TableSpecificConfigScdType `json:"scd_type,omitempty"` + // The column names specifying the logical order of events in the source + // data. Delta Live Tables uses this sequencing to handle change events that + // arrive out of order. + SequenceBy []string `json:"sequence_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TableSpecificConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TableSpecificConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The SCD type to use to ingest the table. +type TableSpecificConfigScdType string + +const TableSpecificConfigScdTypeScdType1 TableSpecificConfigScdType = `SCD_TYPE_1` + +const TableSpecificConfigScdTypeScdType2 TableSpecificConfigScdType = `SCD_TYPE_2` + +// String representation for [fmt.Print] +func (f *TableSpecificConfigScdType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TableSpecificConfigScdType) Set(v string) error { + switch v { + case `SCD_TYPE_1`, `SCD_TYPE_2`: + *f = TableSpecificConfigScdType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "SCD_TYPE_1", "SCD_TYPE_2"`, v) + } +} + +// Type always returns TableSpecificConfigScdType to satisfy [pflag.Value] interface +func (f *TableSpecificConfigScdType) Type() string { + return "TableSpecificConfigScdType" +} + +type UpdateInfo struct { + // What triggered this update. + Cause UpdateInfoCause `json:"cause,omitempty"` + // The ID of the cluster that the update is running on. + ClusterId string `json:"cluster_id,omitempty"` + // The pipeline configuration with system defaults applied where unspecified + // by the user. Not returned by ListUpdates. + Config *PipelineSpec `json:"config,omitempty"` + // The time when this update was created. + CreationTime int64 `json:"creation_time,omitempty"` + // If true, this update will reset all tables before running. + FullRefresh bool `json:"full_refresh,omitempty"` + // A list of tables to update with fullRefresh. If both refresh_selection + // and full_refresh_selection are empty, this is a full graph update. Full + // Refresh on a table means that the states of the table will be reset + // before the refresh. + FullRefreshSelection []string `json:"full_refresh_selection,omitempty"` + // The ID of the pipeline. + PipelineId string `json:"pipeline_id,omitempty"` + // A list of tables to update without fullRefresh. If both refresh_selection + // and full_refresh_selection are empty, this is a full graph update. Full + // Refresh on a table means that the states of the table will be reset + // before the refresh. + RefreshSelection []string `json:"refresh_selection,omitempty"` + // The update state. + State UpdateInfoState `json:"state,omitempty"` + // The ID of this update. + UpdateId string `json:"update_id,omitempty"` + // If true, this update only validates the correctness of pipeline source + // code but does not materialize or publish any datasets. + ValidateOnly bool `json:"validate_only,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// What triggered this update. +type UpdateInfoCause string + +const UpdateInfoCauseApiCall UpdateInfoCause = `API_CALL` + +const UpdateInfoCauseJobTask UpdateInfoCause = `JOB_TASK` + +const UpdateInfoCauseRetryOnFailure UpdateInfoCause = `RETRY_ON_FAILURE` + +const UpdateInfoCauseSchemaChange UpdateInfoCause = `SCHEMA_CHANGE` + +const UpdateInfoCauseServiceUpgrade UpdateInfoCause = `SERVICE_UPGRADE` + +const UpdateInfoCauseUserAction UpdateInfoCause = `USER_ACTION` + +// String representation for [fmt.Print] +func (f *UpdateInfoCause) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpdateInfoCause) Set(v string) error { + switch v { + case `API_CALL`, `JOB_TASK`, `RETRY_ON_FAILURE`, `SCHEMA_CHANGE`, `SERVICE_UPGRADE`, `USER_ACTION`: + *f = UpdateInfoCause(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "API_CALL", "JOB_TASK", "RETRY_ON_FAILURE", "SCHEMA_CHANGE", "SERVICE_UPGRADE", "USER_ACTION"`, v) + } +} + +// Type always returns UpdateInfoCause to satisfy [pflag.Value] interface +func (f *UpdateInfoCause) Type() string { + return "UpdateInfoCause" +} + +// The update state. +type UpdateInfoState string + +const UpdateInfoStateCanceled UpdateInfoState = `CANCELED` + +const UpdateInfoStateCompleted UpdateInfoState = `COMPLETED` + +const UpdateInfoStateCreated UpdateInfoState = `CREATED` + +const UpdateInfoStateFailed UpdateInfoState = `FAILED` + +const UpdateInfoStateInitializing UpdateInfoState = `INITIALIZING` + +const UpdateInfoStateQueued UpdateInfoState = `QUEUED` + +const UpdateInfoStateResetting UpdateInfoState = `RESETTING` + +const UpdateInfoStateRunning UpdateInfoState = `RUNNING` + +const UpdateInfoStateSettingUpTables UpdateInfoState = `SETTING_UP_TABLES` + +const UpdateInfoStateStopping UpdateInfoState = `STOPPING` + +const UpdateInfoStateWaitingForResources UpdateInfoState = `WAITING_FOR_RESOURCES` + +// String representation for [fmt.Print] +func (f *UpdateInfoState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpdateInfoState) Set(v string) error { + switch v { + case `CANCELED`, `COMPLETED`, `CREATED`, `FAILED`, `INITIALIZING`, `QUEUED`, `RESETTING`, `RUNNING`, `SETTING_UP_TABLES`, `STOPPING`, `WAITING_FOR_RESOURCES`: + *f = UpdateInfoState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "COMPLETED", "CREATED", "FAILED", "INITIALIZING", "QUEUED", "RESETTING", "RUNNING", "SETTING_UP_TABLES", "STOPPING", "WAITING_FOR_RESOURCES"`, v) + } +} + +// Type always returns UpdateInfoState to satisfy [pflag.Value] interface +func (f *UpdateInfoState) Type() string { + return "UpdateInfoState" +} + +type UpdateStateInfo struct { + CreationTime string `json:"creation_time,omitempty"` + + State UpdateStateInfoState `json:"state,omitempty"` + + UpdateId string `json:"update_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateStateInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateStateInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateStateInfoState string + +const UpdateStateInfoStateCanceled UpdateStateInfoState = `CANCELED` + +const UpdateStateInfoStateCompleted UpdateStateInfoState = `COMPLETED` + +const UpdateStateInfoStateCreated UpdateStateInfoState = `CREATED` + +const UpdateStateInfoStateFailed UpdateStateInfoState = `FAILED` + +const UpdateStateInfoStateInitializing UpdateStateInfoState = `INITIALIZING` + +const UpdateStateInfoStateQueued UpdateStateInfoState = `QUEUED` + +const UpdateStateInfoStateResetting UpdateStateInfoState = `RESETTING` + +const UpdateStateInfoStateRunning UpdateStateInfoState = `RUNNING` + +const UpdateStateInfoStateSettingUpTables UpdateStateInfoState = `SETTING_UP_TABLES` + +const UpdateStateInfoStateStopping UpdateStateInfoState = `STOPPING` + +const UpdateStateInfoStateWaitingForResources UpdateStateInfoState = `WAITING_FOR_RESOURCES` + +// String representation for [fmt.Print] +func (f *UpdateStateInfoState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpdateStateInfoState) Set(v string) error { + switch v { + case `CANCELED`, `COMPLETED`, `CREATED`, `FAILED`, `INITIALIZING`, `QUEUED`, `RESETTING`, `RUNNING`, `SETTING_UP_TABLES`, `STOPPING`, `WAITING_FOR_RESOURCES`: + *f = UpdateStateInfoState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "COMPLETED", "CREATED", "FAILED", "INITIALIZING", "QUEUED", "RESETTING", "RUNNING", "SETTING_UP_TABLES", "STOPPING", "WAITING_FOR_RESOURCES"`, v) + } +} + +// Type always returns UpdateStateInfoState to satisfy [pflag.Value] interface +func (f *UpdateStateInfoState) Type() string { + return "UpdateStateInfoState" +} + +type VolumesStorageInfo struct { + // Unity Catalog volumes file destination, e.g. + // `/Volumes/catalog/schema/volume/dir/file` + Destination string `json:"destination"` +} + +type WorkspaceStorageInfo struct { + // workspace files destination, e.g. + // `/Users/user1@databricks.com/my-init.sh` + Destination string `json:"destination"` +} diff --git a/provisioning/v2preview/api.go b/provisioning/v2preview/api.go new file mode 100755 index 000000000..dc71f5ff8 --- /dev/null +++ b/provisioning/v2preview/api.go @@ -0,0 +1,1385 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Credentials Preview, Encryption Keys Preview, Networks Preview, Private Access Preview, Storage Preview, Vpc Endpoints Preview, Workspaces Preview, etc. +package provisioningpreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type CredentialsPreviewInterface interface { + + // Create credential configuration. + // + // Creates a Databricks credential configuration that represents cloud + // cross-account credentials for a specified account. Databricks uses this to + // set up network infrastructure properly to host Databricks clusters. For your + // AWS IAM role, you need to trust the External ID (the Databricks Account API + // account ID) in the returned credential object, and configure the required + // access policy. + // + // Save the response's `credentials_id` field, which is the ID for your new + // credential configuration object. + // + // For information about how to create a new workspace with this API, see + // [Create a new workspace using the Account API] + // + // [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error) + + // Delete credential configuration. + // + // Deletes a Databricks credential configuration object for an account, both + // specified by ID. You cannot delete a credential that is associated with any + // workspace. + Delete(ctx context.Context, request DeleteCredentialRequest) error + + // Delete credential configuration. + // + // Deletes a Databricks credential configuration object for an account, both + // specified by ID. You cannot delete a credential that is associated with any + // workspace. + DeleteByCredentialsId(ctx context.Context, credentialsId string) error + + // Get credential configuration. + // + // Gets a Databricks credential configuration object for an account, both + // specified by ID. + Get(ctx context.Context, request GetCredentialRequest) (*Credential, error) + + // Get credential configuration. + // + // Gets a Databricks credential configuration object for an account, both + // specified by ID. + GetByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error) + + // Get all credential configurations. + // + // Gets all Databricks credential configurations associated with an account + // specified by ID. + List(ctx context.Context) ([]Credential, error) + + // CredentialCredentialsNameToCredentialsIdMap calls [CredentialsPreviewAPI.List] and creates a map of results with [Credential].CredentialsName as key and [Credential].CredentialsId as value. + // + // Returns an error if there's more than one [Credential] with the same .CredentialsName. + // + // Note: All [Credential] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + CredentialCredentialsNameToCredentialsIdMap(ctx context.Context) (map[string]string, error) + + // GetByCredentialsName calls [CredentialsPreviewAPI.CredentialCredentialsNameToCredentialsIdMap] and returns a single [Credential]. + // + // Returns an error if there's more than one [Credential] with the same .CredentialsName. + // + // Note: All [Credential] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByCredentialsName(ctx context.Context, name string) (*Credential, error) +} + +func NewCredentialsPreview(client *client.DatabricksClient) *CredentialsPreviewAPI { + return &CredentialsPreviewAPI{ + credentialsPreviewImpl: credentialsPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage credential configurations for this workspace. Databricks +// needs access to a cross-account service IAM role in your AWS account so that +// Databricks can deploy clusters in the appropriate VPC for the new workspace. +// A credential configuration encapsulates this role information, and its ID is +// used when creating a new workspace. +type CredentialsPreviewAPI struct { + credentialsPreviewImpl +} + +// Delete credential configuration. +// +// Deletes a Databricks credential configuration object for an account, both +// specified by ID. You cannot delete a credential that is associated with any +// workspace. +func (a *CredentialsPreviewAPI) DeleteByCredentialsId(ctx context.Context, credentialsId string) error { + return a.credentialsPreviewImpl.Delete(ctx, DeleteCredentialRequest{ + CredentialsId: credentialsId, + }) +} + +// Get credential configuration. +// +// Gets a Databricks credential configuration object for an account, both +// specified by ID. +func (a *CredentialsPreviewAPI) GetByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error) { + return a.credentialsPreviewImpl.Get(ctx, GetCredentialRequest{ + CredentialsId: credentialsId, + }) +} + +// CredentialCredentialsNameToCredentialsIdMap calls [CredentialsPreviewAPI.List] and creates a map of results with [Credential].CredentialsName as key and [Credential].CredentialsId as value. +// +// Returns an error if there's more than one [Credential] with the same .CredentialsName. +// +// Note: All [Credential] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *CredentialsPreviewAPI) CredentialCredentialsNameToCredentialsIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.List(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.CredentialsName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .CredentialsName: %s", key) + } + mapping[key] = v.CredentialsId + } + return mapping, nil +} + +// GetByCredentialsName calls [CredentialsPreviewAPI.CredentialCredentialsNameToCredentialsIdMap] and returns a single [Credential]. +// +// Returns an error if there's more than one [Credential] with the same .CredentialsName. +// +// Note: All [Credential] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *CredentialsPreviewAPI) GetByCredentialsName(ctx context.Context, name string) (*Credential, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.List(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]Credential{} + for _, v := range result { + key := v.CredentialsName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("Credential named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of Credential named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type EncryptionKeysPreviewInterface interface { + + // Create encryption key configuration. + // + // Creates a customer-managed key configuration object for an account, specified + // by ID. This operation uploads a reference to a customer-managed key to + // Databricks. If the key is assigned as a workspace's customer-managed key for + // managed services, Databricks uses the key to encrypt the workspaces notebooks + // and secrets in the control plane, in addition to Databricks SQL queries and + // query history. If it is specified as a workspace's customer-managed key for + // workspace storage, the key encrypts the workspace's root S3 bucket (which + // contains the workspace's root DBFS and system data) and, optionally, cluster + // EBS volume data. + // + // **Important**: Customer-managed keys are supported only for some deployment + // types, subscription types, and AWS regions that currently support creation of + // Databricks workspaces. + // + // This operation is available only if your account is on the E2 version of the + // platform or on a select custom plan that allows multiple workspaces per + // account. + Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error) + + // Delete encryption key configuration. + // + // Deletes a customer-managed key configuration object for an account. You + // cannot delete a configuration that is associated with a running workspace. + Delete(ctx context.Context, request DeleteEncryptionKeyRequest) error + + // Delete encryption key configuration. + // + // Deletes a customer-managed key configuration object for an account. You + // cannot delete a configuration that is associated with a running workspace. + DeleteByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) error + + // Get encryption key configuration. + // + // Gets a customer-managed key configuration object for an account, specified by + // ID. This operation uploads a reference to a customer-managed key to + // Databricks. If assigned as a workspace's customer-managed key for managed + // services, Databricks uses the key to encrypt the workspaces notebooks and + // secrets in the control plane, in addition to Databricks SQL queries and query + // history. If it is specified as a workspace's customer-managed key for + // storage, the key encrypts the workspace's root S3 bucket (which contains the + // workspace's root DBFS and system data) and, optionally, cluster EBS volume + // data. + // + // **Important**: Customer-managed keys are supported only for some deployment + // types, subscription types, and AWS regions. + // + // This operation is available only if your account is on the E2 version of the + // platform.", + Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error) + + // Get encryption key configuration. + // + // Gets a customer-managed key configuration object for an account, specified by + // ID. This operation uploads a reference to a customer-managed key to + // Databricks. If assigned as a workspace's customer-managed key for managed + // services, Databricks uses the key to encrypt the workspaces notebooks and + // secrets in the control plane, in addition to Databricks SQL queries and query + // history. If it is specified as a workspace's customer-managed key for + // storage, the key encrypts the workspace's root S3 bucket (which contains the + // workspace's root DBFS and system data) and, optionally, cluster EBS volume + // data. + // + // **Important**: Customer-managed keys are supported only for some deployment + // types, subscription types, and AWS regions. + // + // This operation is available only if your account is on the E2 version of the + // platform.", + GetByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error) + + // Get all encryption key configurations. + // + // Gets all customer-managed key configuration objects for an account. If the + // key is specified as a workspace's managed services customer-managed key, + // Databricks uses the key to encrypt the workspace's notebooks and secrets in + // the control plane, in addition to Databricks SQL queries and query history. + // If the key is specified as a workspace's storage customer-managed key, the + // key is used to encrypt the workspace's root S3 bucket and optionally can + // encrypt cluster EBS volumes data in the data plane. + // + // **Important**: Customer-managed keys are supported only for some deployment + // types, subscription types, and AWS regions. + // + // This operation is available only if your account is on the E2 version of the + // platform. + List(ctx context.Context) ([]CustomerManagedKey, error) +} + +func NewEncryptionKeysPreview(client *client.DatabricksClient) *EncryptionKeysPreviewAPI { + return &EncryptionKeysPreviewAPI{ + encryptionKeysPreviewImpl: encryptionKeysPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage encryption key configurations for this workspace +// (optional). A key configuration encapsulates the AWS KMS key information and +// some information about how the key configuration can be used. There are two +// possible uses for key configurations: +// +// * Managed services: A key configuration can be used to encrypt a workspace's +// notebook and secret data in the control plane, as well as Databricks SQL +// queries and query history. * Storage: A key configuration can be used to +// encrypt a workspace's DBFS and EBS data in the data plane. +// +// In both of these cases, the key configuration's ID is used when creating a +// new workspace. This Preview feature is available if your account is on the E2 +// version of the platform. Updating a running workspace with workspace storage +// encryption requires that the workspace is on the E2 version of the platform. +// If you have an older workspace, it might not be on the E2 version of the +// platform. If you are not sure, contact your Databricks representative. +type EncryptionKeysPreviewAPI struct { + encryptionKeysPreviewImpl +} + +// Delete encryption key configuration. +// +// Deletes a customer-managed key configuration object for an account. You +// cannot delete a configuration that is associated with a running workspace. +func (a *EncryptionKeysPreviewAPI) DeleteByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) error { + return a.encryptionKeysPreviewImpl.Delete(ctx, DeleteEncryptionKeyRequest{ + CustomerManagedKeyId: customerManagedKeyId, + }) +} + +// Get encryption key configuration. +// +// Gets a customer-managed key configuration object for an account, specified by +// ID. This operation uploads a reference to a customer-managed key to +// Databricks. If assigned as a workspace's customer-managed key for managed +// services, Databricks uses the key to encrypt the workspaces notebooks and +// secrets in the control plane, in addition to Databricks SQL queries and query +// history. If it is specified as a workspace's customer-managed key for +// storage, the key encrypts the workspace's root S3 bucket (which contains the +// workspace's root DBFS and system data) and, optionally, cluster EBS volume +// data. +// +// **Important**: Customer-managed keys are supported only for some deployment +// types, subscription types, and AWS regions. +// +// This operation is available only if your account is on the E2 version of the +// platform.", +func (a *EncryptionKeysPreviewAPI) GetByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error) { + return a.encryptionKeysPreviewImpl.Get(ctx, GetEncryptionKeyRequest{ + CustomerManagedKeyId: customerManagedKeyId, + }) +} + +type NetworksPreviewInterface interface { + + // Create network configuration. + // + // Creates a Databricks network configuration that represents an VPC and its + // resources. The VPC will be used for new Databricks clusters. This requires a + // pre-existing VPC and subnets. + Create(ctx context.Context, request CreateNetworkRequest) (*Network, error) + + // Delete a network configuration. + // + // Deletes a Databricks network configuration, which represents a cloud VPC and + // its resources. You cannot delete a network that is associated with a + // workspace. + // + // This operation is available only if your account is on the E2 version of the + // platform. + Delete(ctx context.Context, request DeleteNetworkRequest) error + + // Delete a network configuration. + // + // Deletes a Databricks network configuration, which represents a cloud VPC and + // its resources. You cannot delete a network that is associated with a + // workspace. + // + // This operation is available only if your account is on the E2 version of the + // platform. + DeleteByNetworkId(ctx context.Context, networkId string) error + + // Get a network configuration. + // + // Gets a Databricks network configuration, which represents a cloud VPC and its + // resources. + Get(ctx context.Context, request GetNetworkRequest) (*Network, error) + + // Get a network configuration. + // + // Gets a Databricks network configuration, which represents a cloud VPC and its + // resources. + GetByNetworkId(ctx context.Context, networkId string) (*Network, error) + + // Get all network configurations. + // + // Gets a list of all Databricks network configurations for an account, + // specified by ID. + // + // This operation is available only if your account is on the E2 version of the + // platform. + List(ctx context.Context) ([]Network, error) + + // NetworkNetworkNameToNetworkIdMap calls [NetworksPreviewAPI.List] and creates a map of results with [Network].NetworkName as key and [Network].NetworkId as value. + // + // Returns an error if there's more than one [Network] with the same .NetworkName. + // + // Note: All [Network] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + NetworkNetworkNameToNetworkIdMap(ctx context.Context) (map[string]string, error) + + // GetByNetworkName calls [NetworksPreviewAPI.NetworkNetworkNameToNetworkIdMap] and returns a single [Network]. + // + // Returns an error if there's more than one [Network] with the same .NetworkName. + // + // Note: All [Network] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByNetworkName(ctx context.Context, name string) (*Network, error) +} + +func NewNetworksPreview(client *client.DatabricksClient) *NetworksPreviewAPI { + return &NetworksPreviewAPI{ + networksPreviewImpl: networksPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage network configurations for customer-managed VPCs +// (optional). Its ID is used when creating a new workspace if you use +// customer-managed VPCs. +type NetworksPreviewAPI struct { + networksPreviewImpl +} + +// Delete a network configuration. +// +// Deletes a Databricks network configuration, which represents a cloud VPC and +// its resources. You cannot delete a network that is associated with a +// workspace. +// +// This operation is available only if your account is on the E2 version of the +// platform. +func (a *NetworksPreviewAPI) DeleteByNetworkId(ctx context.Context, networkId string) error { + return a.networksPreviewImpl.Delete(ctx, DeleteNetworkRequest{ + NetworkId: networkId, + }) +} + +// Get a network configuration. +// +// Gets a Databricks network configuration, which represents a cloud VPC and its +// resources. +func (a *NetworksPreviewAPI) GetByNetworkId(ctx context.Context, networkId string) (*Network, error) { + return a.networksPreviewImpl.Get(ctx, GetNetworkRequest{ + NetworkId: networkId, + }) +} + +// NetworkNetworkNameToNetworkIdMap calls [NetworksPreviewAPI.List] and creates a map of results with [Network].NetworkName as key and [Network].NetworkId as value. +// +// Returns an error if there's more than one [Network] with the same .NetworkName. +// +// Note: All [Network] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *NetworksPreviewAPI) NetworkNetworkNameToNetworkIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.List(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.NetworkName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .NetworkName: %s", key) + } + mapping[key] = v.NetworkId + } + return mapping, nil +} + +// GetByNetworkName calls [NetworksPreviewAPI.NetworkNetworkNameToNetworkIdMap] and returns a single [Network]. +// +// Returns an error if there's more than one [Network] with the same .NetworkName. +// +// Note: All [Network] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *NetworksPreviewAPI) GetByNetworkName(ctx context.Context, name string) (*Network, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.List(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]Network{} + for _, v := range result { + key := v.NetworkName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("Network named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of Network named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type PrivateAccessPreviewInterface interface { + + // Create private access settings. + // + // Creates a private access settings object, which specifies how your workspace + // is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a workspace must + // have a private access settings object referenced by ID in the workspace's + // `private_access_settings_id` property. + // + // You can share one private access settings with multiple workspaces in a + // single account. However, private access settings are specific to AWS regions, + // so only workspaces in the same AWS region can use a given private access + // settings object. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink]. + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error) + + // Delete a private access settings object. + // + // Deletes a private access settings object, which determines how your workspace + // is accessed over [AWS PrivateLink]. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink].", + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + Delete(ctx context.Context, request DeletePrivateAccesRequest) error + + // Delete a private access settings object. + // + // Deletes a private access settings object, which determines how your workspace + // is accessed over [AWS PrivateLink]. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink].", + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + DeleteByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) error + + // Get a private access settings object. + // + // Gets a private access settings object, which specifies how your workspace is + // accessed over [AWS PrivateLink]. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink].", + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error) + + // Get a private access settings object. + // + // Gets a private access settings object, which specifies how your workspace is + // accessed over [AWS PrivateLink]. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink].", + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + GetByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error) + + // Get all private access settings objects. + // + // Gets a list of all private access settings objects for an account, specified + // by ID. + List(ctx context.Context) ([]PrivateAccessSettings, error) + + // PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap calls [PrivateAccessPreviewAPI.List] and creates a map of results with [PrivateAccessSettings].PrivateAccessSettingsName as key and [PrivateAccessSettings].PrivateAccessSettingsId as value. + // + // Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName. + // + // Note: All [PrivateAccessSettings] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx context.Context) (map[string]string, error) + + // GetByPrivateAccessSettingsName calls [PrivateAccessPreviewAPI.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap] and returns a single [PrivateAccessSettings]. + // + // Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName. + // + // Note: All [PrivateAccessSettings] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByPrivateAccessSettingsName(ctx context.Context, name string) (*PrivateAccessSettings, error) + + // Replace private access settings. + // + // Updates an existing private access settings object, which specifies how your + // workspace is accessed over [AWS PrivateLink]. To use AWS PrivateLink, a + // workspace must have a private access settings object referenced by ID in the + // workspace's `private_access_settings_id` property. + // + // This operation completely overwrites your existing private access settings + // object attached to your workspaces. All workspaces attached to the private + // access settings are affected by any change. If `public_access_enabled`, + // `private_access_level`, or `allowed_vpc_endpoint_ids` are updated, effects of + // these changes might take several minutes to propagate to the workspace API. + // + // You can share one private access settings object with multiple workspaces in + // a single account. However, private access settings are specific to AWS + // regions, so only workspaces in the same AWS region can use a given private + // access settings object. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink]. + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error +} + +func NewPrivateAccessPreview(client *client.DatabricksClient) *PrivateAccessPreviewAPI { + return &PrivateAccessPreviewAPI{ + privateAccessPreviewImpl: privateAccessPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage private access settings for this account. +type PrivateAccessPreviewAPI struct { + privateAccessPreviewImpl +} + +// Delete a private access settings object. +// +// Deletes a private access settings object, which determines how your workspace +// is accessed over [AWS PrivateLink]. +// +// Before configuring PrivateLink, read the [Databricks article about +// PrivateLink].", +// +// [AWS PrivateLink]: https://aws.amazon.com/privatelink +// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html +func (a *PrivateAccessPreviewAPI) DeleteByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) error { + return a.privateAccessPreviewImpl.Delete(ctx, DeletePrivateAccesRequest{ + PrivateAccessSettingsId: privateAccessSettingsId, + }) +} + +// Get a private access settings object. +// +// Gets a private access settings object, which specifies how your workspace is +// accessed over [AWS PrivateLink]. +// +// Before configuring PrivateLink, read the [Databricks article about +// PrivateLink].", +// +// [AWS PrivateLink]: https://aws.amazon.com/privatelink +// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html +func (a *PrivateAccessPreviewAPI) GetByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error) { + return a.privateAccessPreviewImpl.Get(ctx, GetPrivateAccesRequest{ + PrivateAccessSettingsId: privateAccessSettingsId, + }) +} + +// PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap calls [PrivateAccessPreviewAPI.List] and creates a map of results with [PrivateAccessSettings].PrivateAccessSettingsName as key and [PrivateAccessSettings].PrivateAccessSettingsId as value. +// +// Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName. +// +// Note: All [PrivateAccessSettings] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *PrivateAccessPreviewAPI) PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.List(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.PrivateAccessSettingsName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .PrivateAccessSettingsName: %s", key) + } + mapping[key] = v.PrivateAccessSettingsId + } + return mapping, nil +} + +// GetByPrivateAccessSettingsName calls [PrivateAccessPreviewAPI.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap] and returns a single [PrivateAccessSettings]. +// +// Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName. +// +// Note: All [PrivateAccessSettings] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *PrivateAccessPreviewAPI) GetByPrivateAccessSettingsName(ctx context.Context, name string) (*PrivateAccessSettings, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.List(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]PrivateAccessSettings{} + for _, v := range result { + key := v.PrivateAccessSettingsName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("PrivateAccessSettings named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of PrivateAccessSettings named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type StoragePreviewInterface interface { + + // Create new storage configuration. + // + // Creates new storage configuration for an account, specified by ID. Uploads a + // storage configuration object that represents the root AWS S3 bucket in your + // account. Databricks stores related workspace assets including DBFS, cluster + // logs, and job results. For the AWS S3 bucket, you need to configure the + // required bucket policy. + // + // For information about how to create a new workspace with this API, see + // [Create a new workspace using the Account API] + // + // [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error) + + // Delete storage configuration. + // + // Deletes a Databricks storage configuration. You cannot delete a storage + // configuration that is associated with any workspace. + Delete(ctx context.Context, request DeleteStorageRequest) error + + // Delete storage configuration. + // + // Deletes a Databricks storage configuration. You cannot delete a storage + // configuration that is associated with any workspace. + DeleteByStorageConfigurationId(ctx context.Context, storageConfigurationId string) error + + // Get storage configuration. + // + // Gets a Databricks storage configuration for an account, both specified by ID. + Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error) + + // Get storage configuration. + // + // Gets a Databricks storage configuration for an account, both specified by ID. + GetByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error) + + // Get all storage configurations. + // + // Gets a list of all Databricks storage configurations for your account, + // specified by ID. + List(ctx context.Context) ([]StorageConfiguration, error) + + // StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap calls [StoragePreviewAPI.List] and creates a map of results with [StorageConfiguration].StorageConfigurationName as key and [StorageConfiguration].StorageConfigurationId as value. + // + // Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName. + // + // Note: All [StorageConfiguration] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx context.Context) (map[string]string, error) + + // GetByStorageConfigurationName calls [StoragePreviewAPI.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap] and returns a single [StorageConfiguration]. + // + // Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName. + // + // Note: All [StorageConfiguration] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByStorageConfigurationName(ctx context.Context, name string) (*StorageConfiguration, error) +} + +func NewStoragePreview(client *client.DatabricksClient) *StoragePreviewAPI { + return &StoragePreviewAPI{ + storagePreviewImpl: storagePreviewImpl{ + client: client, + }, + } +} + +// These APIs manage storage configurations for this workspace. A root storage +// S3 bucket in your account is required to store objects like cluster logs, +// notebook revisions, and job results. You can also use the root storage S3 +// bucket for storage of non-production DBFS data. A storage configuration +// encapsulates this bucket information, and its ID is used when creating a new +// workspace. +type StoragePreviewAPI struct { + storagePreviewImpl +} + +// Delete storage configuration. +// +// Deletes a Databricks storage configuration. You cannot delete a storage +// configuration that is associated with any workspace. +func (a *StoragePreviewAPI) DeleteByStorageConfigurationId(ctx context.Context, storageConfigurationId string) error { + return a.storagePreviewImpl.Delete(ctx, DeleteStorageRequest{ + StorageConfigurationId: storageConfigurationId, + }) +} + +// Get storage configuration. +// +// Gets a Databricks storage configuration for an account, both specified by ID. +func (a *StoragePreviewAPI) GetByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error) { + return a.storagePreviewImpl.Get(ctx, GetStorageRequest{ + StorageConfigurationId: storageConfigurationId, + }) +} + +// StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap calls [StoragePreviewAPI.List] and creates a map of results with [StorageConfiguration].StorageConfigurationName as key and [StorageConfiguration].StorageConfigurationId as value. +// +// Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName. +// +// Note: All [StorageConfiguration] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *StoragePreviewAPI) StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.List(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.StorageConfigurationName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .StorageConfigurationName: %s", key) + } + mapping[key] = v.StorageConfigurationId + } + return mapping, nil +} + +// GetByStorageConfigurationName calls [StoragePreviewAPI.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap] and returns a single [StorageConfiguration]. +// +// Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName. +// +// Note: All [StorageConfiguration] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *StoragePreviewAPI) GetByStorageConfigurationName(ctx context.Context, name string) (*StorageConfiguration, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.List(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]StorageConfiguration{} + for _, v := range result { + key := v.StorageConfigurationName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("StorageConfiguration named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of StorageConfiguration named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type VpcEndpointsPreviewInterface interface { + + // Create VPC endpoint configuration. + // + // Creates a VPC endpoint configuration, which represents a [VPC endpoint] + // object in AWS used to communicate privately with Databricks over [AWS + // PrivateLink]. + // + // After you create the VPC endpoint configuration, the Databricks [endpoint + // service] automatically accepts the VPC endpoint. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink]. + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + // [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html + // [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html + Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error) + + // Delete VPC endpoint configuration. + // + // Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint] + // that can communicate privately with Databricks over [AWS PrivateLink]. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink]. + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink + // [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + Delete(ctx context.Context, request DeleteVpcEndpointRequest) error + + // Delete VPC endpoint configuration. + // + // Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint] + // that can communicate privately with Databricks over [AWS PrivateLink]. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink]. + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink + // [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + DeleteByVpcEndpointId(ctx context.Context, vpcEndpointId string) error + + // Get a VPC endpoint configuration. + // + // Gets a VPC endpoint configuration, which represents a [VPC endpoint] object + // in AWS used to communicate privately with Databricks over [AWS PrivateLink]. + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink + // [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html + Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error) + + // Get a VPC endpoint configuration. + // + // Gets a VPC endpoint configuration, which represents a [VPC endpoint] object + // in AWS used to communicate privately with Databricks over [AWS PrivateLink]. + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink + // [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html + GetByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error) + + // Get all VPC endpoint configurations. + // + // Gets a list of all VPC endpoints for an account, specified by ID. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink]. + // + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + List(ctx context.Context) ([]VpcEndpoint, error) + + // VpcEndpointVpcEndpointNameToVpcEndpointIdMap calls [VpcEndpointsPreviewAPI.List] and creates a map of results with [VpcEndpoint].VpcEndpointName as key and [VpcEndpoint].VpcEndpointId as value. + // + // Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName. + // + // Note: All [VpcEndpoint] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx context.Context) (map[string]string, error) + + // GetByVpcEndpointName calls [VpcEndpointsPreviewAPI.VpcEndpointVpcEndpointNameToVpcEndpointIdMap] and returns a single [VpcEndpoint]. + // + // Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName. + // + // Note: All [VpcEndpoint] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByVpcEndpointName(ctx context.Context, name string) (*VpcEndpoint, error) +} + +func NewVpcEndpointsPreview(client *client.DatabricksClient) *VpcEndpointsPreviewAPI { + return &VpcEndpointsPreviewAPI{ + vpcEndpointsPreviewImpl: vpcEndpointsPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage VPC endpoint configurations for this account. +type VpcEndpointsPreviewAPI struct { + vpcEndpointsPreviewImpl +} + +// Delete VPC endpoint configuration. +// +// Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint] +// that can communicate privately with Databricks over [AWS PrivateLink]. +// +// Before configuring PrivateLink, read the [Databricks article about +// PrivateLink]. +// +// [AWS PrivateLink]: https://aws.amazon.com/privatelink +// [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html +// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html +func (a *VpcEndpointsPreviewAPI) DeleteByVpcEndpointId(ctx context.Context, vpcEndpointId string) error { + return a.vpcEndpointsPreviewImpl.Delete(ctx, DeleteVpcEndpointRequest{ + VpcEndpointId: vpcEndpointId, + }) +} + +// Get a VPC endpoint configuration. +// +// Gets a VPC endpoint configuration, which represents a [VPC endpoint] object +// in AWS used to communicate privately with Databricks over [AWS PrivateLink]. +// +// [AWS PrivateLink]: https://aws.amazon.com/privatelink +// [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html +func (a *VpcEndpointsPreviewAPI) GetByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error) { + return a.vpcEndpointsPreviewImpl.Get(ctx, GetVpcEndpointRequest{ + VpcEndpointId: vpcEndpointId, + }) +} + +// VpcEndpointVpcEndpointNameToVpcEndpointIdMap calls [VpcEndpointsPreviewAPI.List] and creates a map of results with [VpcEndpoint].VpcEndpointName as key and [VpcEndpoint].VpcEndpointId as value. +// +// Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName. +// +// Note: All [VpcEndpoint] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *VpcEndpointsPreviewAPI) VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.List(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.VpcEndpointName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .VpcEndpointName: %s", key) + } + mapping[key] = v.VpcEndpointId + } + return mapping, nil +} + +// GetByVpcEndpointName calls [VpcEndpointsPreviewAPI.VpcEndpointVpcEndpointNameToVpcEndpointIdMap] and returns a single [VpcEndpoint]. +// +// Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName. +// +// Note: All [VpcEndpoint] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *VpcEndpointsPreviewAPI) GetByVpcEndpointName(ctx context.Context, name string) (*VpcEndpoint, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.List(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]VpcEndpoint{} + for _, v := range result { + key := v.VpcEndpointName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("VpcEndpoint named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of VpcEndpoint named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type WorkspacesPreviewInterface interface { + + // Create a new workspace. + // + // Creates a new workspace. + // + // **Important**: This operation is asynchronous. A response with HTTP status + // code 200 means the request has been accepted and is in progress, but does not + // mean that the workspace deployed successfully and is running. The initial + // workspace status is typically `PROVISIONING`. Use the workspace ID + // (`workspace_id`) field in the response to identify the new workspace and make + // repeated `GET` requests with the workspace ID and check its status. The + // workspace becomes available when the status changes to `RUNNING`. + Create(ctx context.Context, request CreateWorkspaceRequest) (*Workspace, error) + + // Delete a workspace. + // + // Terminates and deletes a Databricks workspace. From an API perspective, + // deletion is immediate. However, it might take a few minutes for all + // workspaces resources to be deleted, depending on the size and number of + // workspace resources. + // + // This operation is available only if your account is on the E2 version of the + // platform or on a select custom plan that allows multiple workspaces per + // account. + Delete(ctx context.Context, request DeleteWorkspaceRequest) error + + // Delete a workspace. + // + // Terminates and deletes a Databricks workspace. From an API perspective, + // deletion is immediate. However, it might take a few minutes for all + // workspaces resources to be deleted, depending on the size and number of + // workspace resources. + // + // This operation is available only if your account is on the E2 version of the + // platform or on a select custom plan that allows multiple workspaces per + // account. + DeleteByWorkspaceId(ctx context.Context, workspaceId int64) error + + // Get a workspace. + // + // Gets information including status for a Databricks workspace, specified by + // ID. In the response, the `workspace_status` field indicates the current + // status. After initial workspace creation (which is asynchronous), make + // repeated `GET` requests with the workspace ID and check its status. The + // workspace becomes available when the status changes to `RUNNING`. + // + // For information about how to create a new workspace with this API **including + // error handling**, see [Create a new workspace using the Account API]. + // + // This operation is available only if your account is on the E2 version of the + // platform or on a select custom plan that allows multiple workspaces per + // account. + // + // [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error) + + // Get a workspace. + // + // Gets information including status for a Databricks workspace, specified by + // ID. In the response, the `workspace_status` field indicates the current + // status. After initial workspace creation (which is asynchronous), make + // repeated `GET` requests with the workspace ID and check its status. The + // workspace becomes available when the status changes to `RUNNING`. + // + // For information about how to create a new workspace with this API **including + // error handling**, see [Create a new workspace using the Account API]. + // + // This operation is available only if your account is on the E2 version of the + // platform or on a select custom plan that allows multiple workspaces per + // account. + // + // [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + GetByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error) + + // Get all workspaces. + // + // Gets a list of all workspaces associated with an account, specified by ID. + // + // This operation is available only if your account is on the E2 version of the + // platform or on a select custom plan that allows multiple workspaces per + // account. + List(ctx context.Context) ([]Workspace, error) + + // WorkspaceWorkspaceNameToWorkspaceIdMap calls [WorkspacesPreviewAPI.List] and creates a map of results with [Workspace].WorkspaceName as key and [Workspace].WorkspaceId as value. + // + // Returns an error if there's more than one [Workspace] with the same .WorkspaceName. + // + // Note: All [Workspace] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + WorkspaceWorkspaceNameToWorkspaceIdMap(ctx context.Context) (map[string]int64, error) + + // GetByWorkspaceName calls [WorkspacesPreviewAPI.WorkspaceWorkspaceNameToWorkspaceIdMap] and returns a single [Workspace]. + // + // Returns an error if there's more than one [Workspace] with the same .WorkspaceName. + // + // Note: All [Workspace] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByWorkspaceName(ctx context.Context, name string) (*Workspace, error) + + // Update workspace configuration. + // + // Updates a workspace configuration for either a running workspace or a failed + // workspace. The elements that can be updated varies between these two use + // cases. + // + // ### Update a failed workspace You can update a Databricks workspace + // configuration for failed workspace deployment for some fields, but not all + // fields. For a failed workspace, this request supports updates to the + // following fields only: - Credential configuration ID - Storage configuration + // ID - Network configuration ID. Used only to add or change a network + // configuration for a customer-managed VPC. For a failed workspace only, you + // can convert a workspace with Databricks-managed VPC to use a customer-managed + // VPC by adding this ID. You cannot downgrade a workspace with a + // customer-managed VPC to be a Databricks-managed VPC. You can update the + // network configuration for a failed or running workspace to add PrivateLink + // support, though you must also add a private access settings object. - Key + // configuration ID for managed services (control plane storage, such as + // notebook source and Databricks SQL queries). Used only if you use + // customer-managed keys for managed services. - Key configuration ID for + // workspace storage (root S3 bucket and, optionally, EBS volumes). Used only if + // you use customer-managed keys for workspace storage. **Important**: If the + // workspace was ever in the running state, even if briefly before becoming a + // failed workspace, you cannot add a new key configuration ID for workspace + // storage. - Private access settings ID to add PrivateLink support. You can add + // or update the private access settings ID to upgrade a workspace to add + // support for front-end, back-end, or both types of connectivity. You cannot + // remove (downgrade) any existing front-end or back-end PrivateLink support on + // a workspace. - Custom tags. Given you provide an empty custom tags, the + // update would not be applied. - Network connectivity configuration ID to add + // serverless stable IP support. You can add or update the network connectivity + // configuration ID to ensure the workspace uses the same set of stable IP CIDR + // blocks to access your resources. You cannot remove a network connectivity + // configuration from the workspace once attached, you can only switch to + // another one. + // + // After calling the `PATCH` operation to update the workspace configuration, + // make repeated `GET` requests with the workspace ID and check the workspace + // status. The workspace is successful if the status changes to `RUNNING`. + // + // For information about how to create a new workspace with this API **including + // error handling**, see [Create a new workspace using the Account API]. + // + // ### Update a running workspace You can update a Databricks workspace + // configuration for running workspaces for some fields, but not all fields. For + // a running workspace, this request supports updating the following fields + // only: - Credential configuration ID - Network configuration ID. Used only if + // you already use a customer-managed VPC. You cannot convert a running + // workspace from a Databricks-managed VPC to a customer-managed VPC. You can + // use a network configuration update in this API for a failed or running + // workspace to add support for PrivateLink, although you also need to add a + // private access settings object. - Key configuration ID for managed services + // (control plane storage, such as notebook source and Databricks SQL queries). + // Databricks does not directly encrypt the data with the customer-managed key + // (CMK). Databricks uses both the CMK and the Databricks managed key (DMK) that + // is unique to your workspace to encrypt the Data Encryption Key (DEK). + // Databricks uses the DEK to encrypt your workspace's managed services + // persisted data. If the workspace does not already have a CMK for managed + // services, adding this ID enables managed services encryption for new or + // updated data. Existing managed services data that existed before adding the + // key remains not encrypted with the DEK until it is modified. If the workspace + // already has customer-managed keys for managed services, this request rotates + // (changes) the CMK keys and the DEK is re-encrypted with the DMK and the new + // CMK. - Key configuration ID for workspace storage (root S3 bucket and, + // optionally, EBS volumes). You can set this only if the workspace does not + // already have a customer-managed key configuration for workspace storage. - + // Private access settings ID to add PrivateLink support. You can add or update + // the private access settings ID to upgrade a workspace to add support for + // front-end, back-end, or both types of connectivity. You cannot remove + // (downgrade) any existing front-end or back-end PrivateLink support on a + // workspace. - Custom tags. Given you provide an empty custom tags, the update + // would not be applied. - Network connectivity configuration ID to add + // serverless stable IP support. You can add or update the network connectivity + // configuration ID to ensure the workspace uses the same set of stable IP CIDR + // blocks to access your resources. You cannot remove a network connectivity + // configuration from the workspace once attached, you can only switch to + // another one. + // + // **Important**: To update a running workspace, your workspace must have no + // running compute resources that run in your workspace's VPC in the Classic + // data plane. For example, stop all all-purpose clusters, job clusters, pools + // with running clusters, and Classic SQL warehouses. If you do not terminate + // all cluster instances in the workspace before calling this API, the request + // will fail. + // + // ### Wait until changes take effect. After calling the `PATCH` operation to + // update the workspace configuration, make repeated `GET` requests with the + // workspace ID and check the workspace status and the status of the fields. * + // For workspaces with a Databricks-managed VPC, the workspace status becomes + // `PROVISIONING` temporarily (typically under 20 minutes). If the workspace + // update is successful, the workspace status changes to `RUNNING`. Note that + // you can also check the workspace status in the [Account Console]. However, + // you cannot use or create clusters for another 20 minutes after that status + // change. This results in a total of up to 40 minutes in which you cannot + // create clusters. If you create or use clusters before this time interval + // elapses, clusters do not launch successfully, fail, or could cause other + // unexpected behavior. * For workspaces with a customer-managed VPC, the + // workspace status stays at status `RUNNING` and the VPC change happens + // immediately. A change to the storage customer-managed key configuration ID + // might take a few minutes to update, so continue to check the workspace until + // you observe that it has been updated. If the update fails, the workspace + // might revert silently to its original configuration. After the workspace has + // been updated, you cannot use or create clusters for another 20 minutes. If + // you create or use clusters before this time interval elapses, clusters do not + // launch successfully, fail, or could cause other unexpected behavior. + // + // If you update the _storage_ customer-managed key configurations, it takes 20 + // minutes for the changes to fully take effect. During the 20 minute wait, it + // is important that you stop all REST API calls to the DBFS API. If you are + // modifying _only the managed services key configuration_, you can omit the 20 + // minute wait. + // + // **Important**: Customer-managed keys and customer-managed VPCs are supported + // by only some deployment types and subscription types. If you have questions + // about availability, contact your Databricks representative. + // + // This operation is available only if your account is on the E2 version of the + // platform or on a select custom plan that allows multiple workspaces per + // account. + // + // [Account Console]: https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html + // [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html + Update(ctx context.Context, request UpdateWorkspaceRequest) error +} + +func NewWorkspacesPreview(client *client.DatabricksClient) *WorkspacesPreviewAPI { + return &WorkspacesPreviewAPI{ + workspacesPreviewImpl: workspacesPreviewImpl{ + client: client, + }, + } +} + +// These APIs manage workspaces for this account. A Databricks workspace is an +// environment for accessing all of your Databricks assets. The workspace +// organizes objects (notebooks, libraries, and experiments) into folders, and +// provides access to data and computational resources such as clusters and +// jobs. +// +// These endpoints are available if your account is on the E2 version of the +// platform or on a select custom plan that allows multiple workspaces per +// account. +type WorkspacesPreviewAPI struct { + workspacesPreviewImpl +} + +// Delete a workspace. +// +// Terminates and deletes a Databricks workspace. From an API perspective, +// deletion is immediate. However, it might take a few minutes for all +// workspaces resources to be deleted, depending on the size and number of +// workspace resources. +// +// This operation is available only if your account is on the E2 version of the +// platform or on a select custom plan that allows multiple workspaces per +// account. +func (a *WorkspacesPreviewAPI) DeleteByWorkspaceId(ctx context.Context, workspaceId int64) error { + return a.workspacesPreviewImpl.Delete(ctx, DeleteWorkspaceRequest{ + WorkspaceId: workspaceId, + }) +} + +// Get a workspace. +// +// Gets information including status for a Databricks workspace, specified by +// ID. In the response, the `workspace_status` field indicates the current +// status. After initial workspace creation (which is asynchronous), make +// repeated `GET` requests with the workspace ID and check its status. The +// workspace becomes available when the status changes to `RUNNING`. +// +// For information about how to create a new workspace with this API **including +// error handling**, see [Create a new workspace using the Account API]. +// +// This operation is available only if your account is on the E2 version of the +// platform or on a select custom plan that allows multiple workspaces per +// account. +// +// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html +func (a *WorkspacesPreviewAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error) { + return a.workspacesPreviewImpl.Get(ctx, GetWorkspaceRequest{ + WorkspaceId: workspaceId, + }) +} + +// WorkspaceWorkspaceNameToWorkspaceIdMap calls [WorkspacesPreviewAPI.List] and creates a map of results with [Workspace].WorkspaceName as key and [Workspace].WorkspaceId as value. +// +// Returns an error if there's more than one [Workspace] with the same .WorkspaceName. +// +// Note: All [Workspace] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *WorkspacesPreviewAPI) WorkspaceWorkspaceNameToWorkspaceIdMap(ctx context.Context) (map[string]int64, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]int64{} + result, err := a.List(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.WorkspaceName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .WorkspaceName: %s", key) + } + mapping[key] = v.WorkspaceId + } + return mapping, nil +} + +// GetByWorkspaceName calls [WorkspacesPreviewAPI.WorkspaceWorkspaceNameToWorkspaceIdMap] and returns a single [Workspace]. +// +// Returns an error if there's more than one [Workspace] with the same .WorkspaceName. +// +// Note: All [Workspace] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *WorkspacesPreviewAPI) GetByWorkspaceName(ctx context.Context, name string) (*Workspace, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.List(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]Workspace{} + for _, v := range result { + key := v.WorkspaceName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("Workspace named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of Workspace named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} diff --git a/provisioning/v2preview/client.go b/provisioning/v2preview/client.go new file mode 100755 index 000000000..a70795a98 --- /dev/null +++ b/provisioning/v2preview/client.go @@ -0,0 +1,220 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provisioningpreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" +) + +type CredentialsPreviewClient struct { + CredentialsPreviewInterface + + Config *config.Config +} + +func NewCredentialsPreviewClient(cfg *config.Config) (*CredentialsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &CredentialsPreviewClient{ + Config: cfg, + CredentialsPreviewInterface: NewCredentialsPreview(apiClient), + }, nil +} + +type EncryptionKeysPreviewClient struct { + EncryptionKeysPreviewInterface + + Config *config.Config +} + +func NewEncryptionKeysPreviewClient(cfg *config.Config) (*EncryptionKeysPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &EncryptionKeysPreviewClient{ + Config: cfg, + EncryptionKeysPreviewInterface: NewEncryptionKeysPreview(apiClient), + }, nil +} + +type NetworksPreviewClient struct { + NetworksPreviewInterface + + Config *config.Config +} + +func NewNetworksPreviewClient(cfg *config.Config) (*NetworksPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &NetworksPreviewClient{ + Config: cfg, + NetworksPreviewInterface: NewNetworksPreview(apiClient), + }, nil +} + +type PrivateAccessPreviewClient struct { + PrivateAccessPreviewInterface + + Config *config.Config +} + +func NewPrivateAccessPreviewClient(cfg *config.Config) (*PrivateAccessPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &PrivateAccessPreviewClient{ + Config: cfg, + PrivateAccessPreviewInterface: NewPrivateAccessPreview(apiClient), + }, nil +} + +type StoragePreviewClient struct { + StoragePreviewInterface + + Config *config.Config +} + +func NewStoragePreviewClient(cfg *config.Config) (*StoragePreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &StoragePreviewClient{ + Config: cfg, + StoragePreviewInterface: NewStoragePreview(apiClient), + }, nil +} + +type VpcEndpointsPreviewClient struct { + VpcEndpointsPreviewInterface + + Config *config.Config +} + +func NewVpcEndpointsPreviewClient(cfg *config.Config) (*VpcEndpointsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &VpcEndpointsPreviewClient{ + Config: cfg, + VpcEndpointsPreviewInterface: NewVpcEndpointsPreview(apiClient), + }, nil +} + +type WorkspacesPreviewClient struct { + WorkspacesPreviewInterface + + Config *config.Config +} + +func NewWorkspacesPreviewClient(cfg *config.Config) (*WorkspacesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &WorkspacesPreviewClient{ + Config: cfg, + WorkspacesPreviewInterface: NewWorkspacesPreview(apiClient), + }, nil +} diff --git a/provisioning/v2preview/impl.go b/provisioning/v2preview/impl.go new file mode 100755 index 000000000..f8f9b1689 --- /dev/null +++ b/provisioning/v2preview/impl.go @@ -0,0 +1,355 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provisioningpreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" +) + +// unexported type that holds implementations of just CredentialsPreview API methods +type credentialsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *credentialsPreviewImpl) Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error) { + var credential Credential + path := fmt.Sprintf("/api/2.0preview/accounts/%v/credentials", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &credential) + return &credential, err +} + +func (a *credentialsPreviewImpl) Delete(ctx context.Context, request DeleteCredentialRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/credentials/%v", a.client.ConfiguredAccountID(), request.CredentialsId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *credentialsPreviewImpl) Get(ctx context.Context, request GetCredentialRequest) (*Credential, error) { + var credential Credential + path := fmt.Sprintf("/api/2.0preview/accounts/%v/credentials/%v", a.client.ConfiguredAccountID(), request.CredentialsId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &credential) + return &credential, err +} + +func (a *credentialsPreviewImpl) List(ctx context.Context) ([]Credential, error) { + var credentialList []Credential + path := fmt.Sprintf("/api/2.0preview/accounts/%v/credentials", a.client.ConfiguredAccountID()) + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &credentialList) + return credentialList, err +} + +// unexported type that holds implementations of just EncryptionKeysPreview API methods +type encryptionKeysPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *encryptionKeysPreviewImpl) Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error) { + var customerManagedKey CustomerManagedKey + path := fmt.Sprintf("/api/2.0preview/accounts/%v/customer-managed-keys", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &customerManagedKey) + return &customerManagedKey, err +} + +func (a *encryptionKeysPreviewImpl) Delete(ctx context.Context, request DeleteEncryptionKeyRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/customer-managed-keys/%v", a.client.ConfiguredAccountID(), request.CustomerManagedKeyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *encryptionKeysPreviewImpl) Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error) { + var customerManagedKey CustomerManagedKey + path := fmt.Sprintf("/api/2.0preview/accounts/%v/customer-managed-keys/%v", a.client.ConfiguredAccountID(), request.CustomerManagedKeyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &customerManagedKey) + return &customerManagedKey, err +} + +func (a *encryptionKeysPreviewImpl) List(ctx context.Context) ([]CustomerManagedKey, error) { + var customerManagedKeyList []CustomerManagedKey + path := fmt.Sprintf("/api/2.0preview/accounts/%v/customer-managed-keys", a.client.ConfiguredAccountID()) + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &customerManagedKeyList) + return customerManagedKeyList, err +} + +// unexported type that holds implementations of just NetworksPreview API methods +type networksPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *networksPreviewImpl) Create(ctx context.Context, request CreateNetworkRequest) (*Network, error) { + var network Network + path := fmt.Sprintf("/api/2.0preview/accounts/%v/networks", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &network) + return &network, err +} + +func (a *networksPreviewImpl) Delete(ctx context.Context, request DeleteNetworkRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/networks/%v", a.client.ConfiguredAccountID(), request.NetworkId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *networksPreviewImpl) Get(ctx context.Context, request GetNetworkRequest) (*Network, error) { + var network Network + path := fmt.Sprintf("/api/2.0preview/accounts/%v/networks/%v", a.client.ConfiguredAccountID(), request.NetworkId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &network) + return &network, err +} + +func (a *networksPreviewImpl) List(ctx context.Context) ([]Network, error) { + var networkList []Network + path := fmt.Sprintf("/api/2.0preview/accounts/%v/networks", a.client.ConfiguredAccountID()) + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &networkList) + return networkList, err +} + +// unexported type that holds implementations of just PrivateAccessPreview API methods +type privateAccessPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *privateAccessPreviewImpl) Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error) { + var privateAccessSettings PrivateAccessSettings + path := fmt.Sprintf("/api/2.0preview/accounts/%v/private-access-settings", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &privateAccessSettings) + return &privateAccessSettings, err +} + +func (a *privateAccessPreviewImpl) Delete(ctx context.Context, request DeletePrivateAccesRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/private-access-settings/%v", a.client.ConfiguredAccountID(), request.PrivateAccessSettingsId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *privateAccessPreviewImpl) Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error) { + var privateAccessSettings PrivateAccessSettings + path := fmt.Sprintf("/api/2.0preview/accounts/%v/private-access-settings/%v", a.client.ConfiguredAccountID(), request.PrivateAccessSettingsId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &privateAccessSettings) + return &privateAccessSettings, err +} + +func (a *privateAccessPreviewImpl) List(ctx context.Context) ([]PrivateAccessSettings, error) { + var privateAccessSettingsList []PrivateAccessSettings + path := fmt.Sprintf("/api/2.0preview/accounts/%v/private-access-settings", a.client.ConfiguredAccountID()) + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &privateAccessSettingsList) + return privateAccessSettingsList, err +} + +func (a *privateAccessPreviewImpl) Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error { + var replaceResponse ReplaceResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/private-access-settings/%v", a.client.ConfiguredAccountID(), request.PrivateAccessSettingsId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &replaceResponse) + return err +} + +// unexported type that holds implementations of just StoragePreview API methods +type storagePreviewImpl struct { + client *client.DatabricksClient +} + +func (a *storagePreviewImpl) Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error) { + var storageConfiguration StorageConfiguration + path := fmt.Sprintf("/api/2.0preview/accounts/%v/storage-configurations", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &storageConfiguration) + return &storageConfiguration, err +} + +func (a *storagePreviewImpl) Delete(ctx context.Context, request DeleteStorageRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/storage-configurations/%v", a.client.ConfiguredAccountID(), request.StorageConfigurationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *storagePreviewImpl) Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error) { + var storageConfiguration StorageConfiguration + path := fmt.Sprintf("/api/2.0preview/accounts/%v/storage-configurations/%v", a.client.ConfiguredAccountID(), request.StorageConfigurationId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &storageConfiguration) + return &storageConfiguration, err +} + +func (a *storagePreviewImpl) List(ctx context.Context) ([]StorageConfiguration, error) { + var storageConfigurationList []StorageConfiguration + path := fmt.Sprintf("/api/2.0preview/accounts/%v/storage-configurations", a.client.ConfiguredAccountID()) + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &storageConfigurationList) + return storageConfigurationList, err +} + +// unexported type that holds implementations of just VpcEndpointsPreview API methods +type vpcEndpointsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *vpcEndpointsPreviewImpl) Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error) { + var vpcEndpoint VpcEndpoint + path := fmt.Sprintf("/api/2.0preview/accounts/%v/vpc-endpoints", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &vpcEndpoint) + return &vpcEndpoint, err +} + +func (a *vpcEndpointsPreviewImpl) Delete(ctx context.Context, request DeleteVpcEndpointRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/vpc-endpoints/%v", a.client.ConfiguredAccountID(), request.VpcEndpointId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *vpcEndpointsPreviewImpl) Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error) { + var vpcEndpoint VpcEndpoint + path := fmt.Sprintf("/api/2.0preview/accounts/%v/vpc-endpoints/%v", a.client.ConfiguredAccountID(), request.VpcEndpointId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &vpcEndpoint) + return &vpcEndpoint, err +} + +func (a *vpcEndpointsPreviewImpl) List(ctx context.Context) ([]VpcEndpoint, error) { + var vpcEndpointList []VpcEndpoint + path := fmt.Sprintf("/api/2.0preview/accounts/%v/vpc-endpoints", a.client.ConfiguredAccountID()) + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &vpcEndpointList) + return vpcEndpointList, err +} + +// unexported type that holds implementations of just WorkspacesPreview API methods +type workspacesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *workspacesPreviewImpl) Create(ctx context.Context, request CreateWorkspaceRequest) (*Workspace, error) { + var workspace Workspace + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &workspace) + return &workspace, err +} + +func (a *workspacesPreviewImpl) Delete(ctx context.Context, request DeleteWorkspaceRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *workspacesPreviewImpl) Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error) { + var workspace Workspace + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &workspace) + return &workspace, err +} + +func (a *workspacesPreviewImpl) List(ctx context.Context) ([]Workspace, error) { + var workspaceList []Workspace + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces", a.client.ConfiguredAccountID()) + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &workspaceList) + return workspaceList, err +} + +func (a *workspacesPreviewImpl) Update(ctx context.Context, request UpdateWorkspaceRequest) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) + return err +} diff --git a/provisioning/v2preview/model.go b/provisioning/v2preview/model.go new file mode 100755 index 000000000..4271c5e11 --- /dev/null +++ b/provisioning/v2preview/model.go @@ -0,0 +1,1365 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provisioningpreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type AwsCredentials struct { + StsRole *StsRole `json:"sts_role,omitempty"` +} + +type AwsKeyInfo struct { + // The AWS KMS key alias. + KeyAlias string `json:"key_alias,omitempty"` + // The AWS KMS key's Amazon Resource Name (ARN). + KeyArn string `json:"key_arn"` + // The AWS KMS key region. + KeyRegion string `json:"key_region"` + // This field applies only if the `use_cases` property includes `STORAGE`. + // If this is set to `true` or omitted, the key is also used to encrypt + // cluster EBS volumes. If you do not want to use this key for encrypting + // EBS volumes, set to `false`. + ReuseKeyForClusterVolumes bool `json:"reuse_key_for_cluster_volumes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AwsKeyInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AwsKeyInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AzureWorkspaceInfo struct { + // Azure Resource Group name + ResourceGroup string `json:"resource_group,omitempty"` + // Azure Subscription ID + SubscriptionId string `json:"subscription_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AzureWorkspaceInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AzureWorkspaceInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The general workspace configurations that are specific to cloud providers. +type CloudResourceContainer struct { + // The general workspace configurations that are specific to Google Cloud. + Gcp *CustomerFacingGcpCloudResourceContainer `json:"gcp,omitempty"` +} + +type CreateAwsKeyInfo struct { + // The AWS KMS key alias. + KeyAlias string `json:"key_alias,omitempty"` + // The AWS KMS key's Amazon Resource Name (ARN). Note that the key's AWS + // region is inferred from the ARN. + KeyArn string `json:"key_arn"` + // This field applies only if the `use_cases` property includes `STORAGE`. + // If this is set to `true` or omitted, the key is also used to encrypt + // cluster EBS volumes. To not use this key also for encrypting EBS volumes, + // set this to `false`. + ReuseKeyForClusterVolumes bool `json:"reuse_key_for_cluster_volumes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateAwsKeyInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateAwsKeyInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateCredentialAwsCredentials struct { + StsRole *CreateCredentialStsRole `json:"sts_role,omitempty"` +} + +type CreateCredentialRequest struct { + AwsCredentials CreateCredentialAwsCredentials `json:"aws_credentials"` + // The human-readable name of the credential configuration object. + CredentialsName string `json:"credentials_name"` +} + +type CreateCredentialStsRole struct { + // The Amazon Resource Name (ARN) of the cross account role. + RoleArn string `json:"role_arn,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateCredentialStsRole) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateCredentialStsRole) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateCustomerManagedKeyRequest struct { + AwsKeyInfo *CreateAwsKeyInfo `json:"aws_key_info,omitempty"` + + GcpKeyInfo *CreateGcpKeyInfo `json:"gcp_key_info,omitempty"` + // The cases that the key can be used for. + UseCases []KeyUseCase `json:"use_cases"` +} + +type CreateGcpKeyInfo struct { + // The GCP KMS key's resource name + KmsKeyId string `json:"kms_key_id"` +} + +type CreateNetworkRequest struct { + // The Google Cloud specific information for this network (for example, the + // VPC ID, subnet ID, and secondary IP ranges). + GcpNetworkInfo *GcpNetworkInfo `json:"gcp_network_info,omitempty"` + // The human-readable name of the network configuration. + NetworkName string `json:"network_name"` + // IDs of one to five security groups associated with this network. Security + // group IDs **cannot** be used in multiple network configurations. + SecurityGroupIds []string `json:"security_group_ids,omitempty"` + // IDs of at least two subnets associated with this network. Subnet IDs + // **cannot** be used in multiple network configurations. + SubnetIds []string `json:"subnet_ids,omitempty"` + // If specified, contains the VPC endpoints used to allow cluster + // communication from this VPC over [AWS PrivateLink]. + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ + VpcEndpoints *NetworkVpcEndpoints `json:"vpc_endpoints,omitempty"` + // The ID of the VPC associated with this network. VPC IDs can be used in + // multiple network configurations. + VpcId string `json:"vpc_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateNetworkRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateNetworkRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateStorageConfigurationRequest struct { + // Root S3 bucket information. + RootBucketInfo RootBucketInfo `json:"root_bucket_info"` + // The human-readable name of the storage configuration. + StorageConfigurationName string `json:"storage_configuration_name"` +} + +type CreateVpcEndpointRequest struct { + // The ID of the VPC endpoint object in AWS. + AwsVpcEndpointId string `json:"aws_vpc_endpoint_id,omitempty"` + // The Google Cloud specific information for this Private Service Connect + // endpoint. + GcpVpcEndpointInfo *GcpVpcEndpointInfo `json:"gcp_vpc_endpoint_info,omitempty"` + // The AWS region in which this VPC endpoint object exists. + Region string `json:"region,omitempty"` + // The human-readable name of the storage configuration. + VpcEndpointName string `json:"vpc_endpoint_name"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateVpcEndpointRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateVpcEndpointRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateWorkspaceRequest struct { + // The AWS region of the workspace's data plane. + AwsRegion string `json:"aws_region,omitempty"` + // The cloud provider which the workspace uses. For Google Cloud workspaces, + // always set this field to `gcp`. + Cloud string `json:"cloud,omitempty"` + // The general workspace configurations that are specific to cloud + // providers. + CloudResourceContainer *CloudResourceContainer `json:"cloud_resource_container,omitempty"` + // ID of the workspace's credential configuration object. + CredentialsId string `json:"credentials_id,omitempty"` + // The custom tags key-value pairing that is attached to this workspace. The + // key-value pair is a string of utf-8 characters. The value can be an empty + // string, with maximum length of 255 characters. The key can be of maximum + // length of 127 characters, and cannot be empty. + CustomTags map[string]string `json:"custom_tags,omitempty"` + // The deployment name defines part of the subdomain for the workspace. The + // workspace URL for the web application and REST APIs is + // `.cloud.databricks.com`. For example, if the + // deployment name is `abcsales`, your workspace URL will be + // `https://abcsales.cloud.databricks.com`. Hyphens are allowed. This + // property supports only the set of characters that are allowed in a + // subdomain. + // + // To set this value, you must have a deployment name prefix. Contact your + // Databricks account team to add an account deployment name prefix to your + // account. + // + // Workspace deployment names follow the account prefix and a hyphen. For + // example, if your account's deployment prefix is `acme` and the workspace + // deployment name is `workspace-1`, the JSON response for the + // `deployment_name` field becomes `acme-workspace-1`. The workspace URL + // would be `acme-workspace-1.cloud.databricks.com`. + // + // You can also set the `deployment_name` to the reserved keyword `EMPTY` if + // you want the deployment name to only include the deployment prefix. For + // example, if your account's deployment prefix is `acme` and the workspace + // deployment name is `EMPTY`, the `deployment_name` becomes `acme` only and + // the workspace URL is `acme.cloud.databricks.com`. + // + // This value must be unique across all non-deleted deployments across all + // AWS regions. + // + // If a new workspace omits this property, the server generates a unique + // deployment name for you with the pattern `dbc-xxxxxxxx-xxxx`. + DeploymentName string `json:"deployment_name,omitempty"` + // The network settings for the workspace. The configurations are only for + // Databricks-managed VPCs. It is ignored if you specify a customer-managed + // VPC in the `network_id` field.", All the IP range configurations must be + // mutually exclusive. An attempt to create a workspace fails if Databricks + // detects an IP range overlap. + // + // Specify custom IP ranges in CIDR format. The IP ranges for these fields + // must not overlap, and all IP addresses must be entirely within the + // following ranges: `10.0.0.0/8`, `100.64.0.0/10`, `172.16.0.0/12`, + // `192.168.0.0/16`, and `240.0.0.0/4`. + // + // The sizes of these IP ranges affect the maximum number of nodes for the + // workspace. + // + // **Important**: Confirm the IP ranges used by your Databricks workspace + // before creating the workspace. You cannot change them after your + // workspace is deployed. If the IP address ranges for your Databricks are + // too small, IP exhaustion can occur, causing your Databricks jobs to fail. + // To determine the address range sizes that you need, Databricks provides a + // calculator as a Microsoft Excel spreadsheet. See [calculate subnet sizes + // for a new workspace]. + // + // [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html + GcpManagedNetworkConfig *GcpManagedNetworkConfig `json:"gcp_managed_network_config,omitempty"` + // The configurations for the GKE cluster of a Databricks workspace. + GkeConfig *GkeConfig `json:"gke_config,omitempty"` + // Whether no public IP is enabled for the workspace. + IsNoPublicIpEnabled bool `json:"is_no_public_ip_enabled,omitempty"` + // The Google Cloud region of the workspace data plane in your Google + // account. For example, `us-east4`. + Location string `json:"location,omitempty"` + // The ID of the workspace's managed services encryption key configuration + // object. This is used to help protect and control access to the + // workspace's notebooks, secrets, Databricks SQL queries, and query + // history. The provided key configuration object property `use_cases` must + // contain `MANAGED_SERVICES`. + ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"` + + NetworkId string `json:"network_id,omitempty"` + // The pricing tier of the workspace. For pricing tier information, see [AWS + // Pricing]. + // + // [AWS Pricing]: https://databricks.com/product/aws-pricing + PricingTier PricingTier `json:"pricing_tier,omitempty"` + // ID of the workspace's private access settings object. Only used for + // PrivateLink. This ID must be specified for customers using [AWS + // PrivateLink] for either front-end (user-to-workspace connection), + // back-end (data plane to control plane connection), or both connection + // types. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink].", + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"` + // The ID of the workspace's storage configuration object. + StorageConfigurationId string `json:"storage_configuration_id,omitempty"` + // The ID of the workspace's storage encryption key configuration object. + // This is used to encrypt the workspace's root S3 bucket (root DBFS and + // system data) and, optionally, cluster EBS volumes. The provided key + // configuration object property `use_cases` must contain `STORAGE`. + StorageCustomerManagedKeyId string `json:"storage_customer_managed_key_id,omitempty"` + // The workspace's human-readable name. + WorkspaceName string `json:"workspace_name"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateWorkspaceRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateWorkspaceRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Credential struct { + // The Databricks account ID that hosts the credential. + AccountId string `json:"account_id,omitempty"` + + AwsCredentials *AwsCredentials `json:"aws_credentials,omitempty"` + // Time in epoch milliseconds when the credential was created. + CreationTime int64 `json:"creation_time,omitempty"` + // Databricks credential configuration ID. + CredentialsId string `json:"credentials_id,omitempty"` + // The human-readable name of the credential configuration object. + CredentialsName string `json:"credentials_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Credential) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Credential) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The custom tags key-value pairing that is attached to this workspace. The +// key-value pair is a string of utf-8 characters. The value can be an empty +// string, with maximum length of 255 characters. The key can be of maximum +// length of 127 characters, and cannot be empty. +type CustomTags map[string]string + +// The general workspace configurations that are specific to Google Cloud. +type CustomerFacingGcpCloudResourceContainer struct { + // The Google Cloud project ID, which the workspace uses to instantiate + // cloud resources for your workspace. + ProjectId string `json:"project_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CustomerFacingGcpCloudResourceContainer) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CustomerFacingGcpCloudResourceContainer) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CustomerManagedKey struct { + // The Databricks account ID that holds the customer-managed key. + AccountId string `json:"account_id,omitempty"` + + AwsKeyInfo *AwsKeyInfo `json:"aws_key_info,omitempty"` + // Time in epoch milliseconds when the customer key was created. + CreationTime int64 `json:"creation_time,omitempty"` + // ID of the encryption key configuration object. + CustomerManagedKeyId string `json:"customer_managed_key_id,omitempty"` + + GcpKeyInfo *GcpKeyInfo `json:"gcp_key_info,omitempty"` + // The cases that the key can be used for. + UseCases []KeyUseCase `json:"use_cases,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CustomerManagedKey) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CustomerManagedKey) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete credential configuration +type DeleteCredentialRequest struct { + // Databricks Account API credential configuration ID + CredentialsId string `json:"-" url:"-"` +} + +// Delete encryption key configuration +type DeleteEncryptionKeyRequest struct { + // Databricks encryption key configuration ID. + CustomerManagedKeyId string `json:"-" url:"-"` +} + +// Delete a network configuration +type DeleteNetworkRequest struct { + // Databricks Account API network configuration ID. + NetworkId string `json:"-" url:"-"` +} + +// Delete a private access settings object +type DeletePrivateAccesRequest struct { + // Databricks Account API private access settings ID. + PrivateAccessSettingsId string `json:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete storage configuration +type DeleteStorageRequest struct { + // Databricks Account API storage configuration ID. + StorageConfigurationId string `json:"-" url:"-"` +} + +// Delete VPC endpoint configuration +type DeleteVpcEndpointRequest struct { + // Databricks VPC endpoint ID. + VpcEndpointId string `json:"-" url:"-"` +} + +// Delete a workspace +type DeleteWorkspaceRequest struct { + // Workspace ID. + WorkspaceId int64 `json:"-" url:"-"` +} + +// This enumeration represents the type of Databricks VPC [endpoint service] +// that was used when creating this VPC endpoint. +// +// [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html +type EndpointUseCase string + +const EndpointUseCaseDataplaneRelayAccess EndpointUseCase = `DATAPLANE_RELAY_ACCESS` + +const EndpointUseCaseWorkspaceAccess EndpointUseCase = `WORKSPACE_ACCESS` + +// String representation for [fmt.Print] +func (f *EndpointUseCase) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointUseCase) Set(v string) error { + switch v { + case `DATAPLANE_RELAY_ACCESS`, `WORKSPACE_ACCESS`: + *f = EndpointUseCase(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATAPLANE_RELAY_ACCESS", "WORKSPACE_ACCESS"`, v) + } +} + +// Type always returns EndpointUseCase to satisfy [pflag.Value] interface +func (f *EndpointUseCase) Type() string { + return "EndpointUseCase" +} + +// The AWS resource associated with this error: credentials, VPC, subnet, +// security group, or network ACL. +type ErrorType string + +const ErrorTypeCredentials ErrorType = `credentials` + +const ErrorTypeNetworkAcl ErrorType = `networkAcl` + +const ErrorTypeSecurityGroup ErrorType = `securityGroup` + +const ErrorTypeSubnet ErrorType = `subnet` + +const ErrorTypeVpc ErrorType = `vpc` + +// String representation for [fmt.Print] +func (f *ErrorType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ErrorType) Set(v string) error { + switch v { + case `credentials`, `networkAcl`, `securityGroup`, `subnet`, `vpc`: + *f = ErrorType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "credentials", "networkAcl", "securityGroup", "subnet", "vpc"`, v) + } +} + +// Type always returns ErrorType to satisfy [pflag.Value] interface +func (f *ErrorType) Type() string { + return "ErrorType" +} + +type ExternalCustomerInfo struct { + // Email of the authoritative user. + AuthoritativeUserEmail string `json:"authoritative_user_email,omitempty"` + // The authoritative user full name. + AuthoritativeUserFullName string `json:"authoritative_user_full_name,omitempty"` + // The legal entity name for the external workspace + CustomerName string `json:"customer_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExternalCustomerInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExternalCustomerInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GcpKeyInfo struct { + // The GCP KMS key's resource name + KmsKeyId string `json:"kms_key_id"` +} + +// The network settings for the workspace. The configurations are only for +// Databricks-managed VPCs. It is ignored if you specify a customer-managed VPC +// in the `network_id` field.", All the IP range configurations must be mutually +// exclusive. An attempt to create a workspace fails if Databricks detects an IP +// range overlap. +// +// Specify custom IP ranges in CIDR format. The IP ranges for these fields must +// not overlap, and all IP addresses must be entirely within the following +// ranges: `10.0.0.0/8`, `100.64.0.0/10`, `172.16.0.0/12`, `192.168.0.0/16`, and +// `240.0.0.0/4`. +// +// The sizes of these IP ranges affect the maximum number of nodes for the +// workspace. +// +// **Important**: Confirm the IP ranges used by your Databricks workspace before +// creating the workspace. You cannot change them after your workspace is +// deployed. If the IP address ranges for your Databricks are too small, IP +// exhaustion can occur, causing your Databricks jobs to fail. To determine the +// address range sizes that you need, Databricks provides a calculator as a +// Microsoft Excel spreadsheet. See [calculate subnet sizes for a new +// workspace]. +// +// [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html +type GcpManagedNetworkConfig struct { + // The IP range from which to allocate GKE cluster pods. No bigger than `/9` + // and no smaller than `/21`. + GkeClusterPodIpRange string `json:"gke_cluster_pod_ip_range,omitempty"` + // The IP range from which to allocate GKE cluster services. No bigger than + // `/16` and no smaller than `/27`. + GkeClusterServiceIpRange string `json:"gke_cluster_service_ip_range,omitempty"` + // The IP range from which to allocate GKE cluster nodes. No bigger than + // `/9` and no smaller than `/29`. + SubnetCidr string `json:"subnet_cidr,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GcpManagedNetworkConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GcpManagedNetworkConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The Google Cloud specific information for this network (for example, the VPC +// ID, subnet ID, and secondary IP ranges). +type GcpNetworkInfo struct { + // The Google Cloud project ID of the VPC network. + NetworkProjectId string `json:"network_project_id"` + // The name of the secondary IP range for pods. A Databricks-managed GKE + // cluster uses this IP range for its pods. This secondary IP range can be + // used by only one workspace. + PodIpRangeName string `json:"pod_ip_range_name"` + // The name of the secondary IP range for services. A Databricks-managed GKE + // cluster uses this IP range for its services. This secondary IP range can + // be used by only one workspace. + ServiceIpRangeName string `json:"service_ip_range_name"` + // The ID of the subnet associated with this network. + SubnetId string `json:"subnet_id"` + // The Google Cloud region of the workspace data plane (for example, + // `us-east4`). + SubnetRegion string `json:"subnet_region"` + // The ID of the VPC associated with this network. VPC IDs can be used in + // multiple network configurations. + VpcId string `json:"vpc_id"` +} + +// The Google Cloud specific information for this Private Service Connect +// endpoint. +type GcpVpcEndpointInfo struct { + // Region of the PSC endpoint. + EndpointRegion string `json:"endpoint_region"` + // The Google Cloud project ID of the VPC network where the PSC connection + // resides. + ProjectId string `json:"project_id"` + // The unique ID of this PSC connection. + PscConnectionId string `json:"psc_connection_id,omitempty"` + // The name of the PSC endpoint in the Google Cloud project. + PscEndpointName string `json:"psc_endpoint_name"` + // The service attachment this PSC connection connects to. + ServiceAttachmentId string `json:"service_attachment_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GcpVpcEndpointInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GcpVpcEndpointInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get credential configuration +type GetCredentialRequest struct { + // Databricks Account API credential configuration ID + CredentialsId string `json:"-" url:"-"` +} + +// Get encryption key configuration +type GetEncryptionKeyRequest struct { + // Databricks encryption key configuration ID. + CustomerManagedKeyId string `json:"-" url:"-"` +} + +// Get a network configuration +type GetNetworkRequest struct { + // Databricks Account API network configuration ID. + NetworkId string `json:"-" url:"-"` +} + +// Get a private access settings object +type GetPrivateAccesRequest struct { + // Databricks Account API private access settings ID. + PrivateAccessSettingsId string `json:"-" url:"-"` +} + +// Get storage configuration +type GetStorageRequest struct { + // Databricks Account API storage configuration ID. + StorageConfigurationId string `json:"-" url:"-"` +} + +// Get a VPC endpoint configuration +type GetVpcEndpointRequest struct { + // Databricks VPC endpoint ID. + VpcEndpointId string `json:"-" url:"-"` +} + +// Get a workspace +type GetWorkspaceRequest struct { + // Workspace ID. + WorkspaceId int64 `json:"-" url:"-"` +} + +// The configurations for the GKE cluster of a Databricks workspace. +type GkeConfig struct { + // Specifies the network connectivity types for the GKE nodes and the GKE + // master network. + // + // Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the + // workspace. The GKE nodes will not have public IPs. + // + // Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of + // a public GKE cluster have public IP addresses. + ConnectivityType GkeConfigConnectivityType `json:"connectivity_type,omitempty"` + // The IP range from which to allocate GKE cluster master resources. This + // field will be ignored if GKE private cluster is not enabled. + // + // It must be exactly as big as `/28`. + MasterIpRange string `json:"master_ip_range,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GkeConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GkeConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Specifies the network connectivity types for the GKE nodes and the GKE master +// network. +// +// Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the +// workspace. The GKE nodes will not have public IPs. +// +// Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of a +// public GKE cluster have public IP addresses. +type GkeConfigConnectivityType string + +const GkeConfigConnectivityTypePrivateNodePublicMaster GkeConfigConnectivityType = `PRIVATE_NODE_PUBLIC_MASTER` + +const GkeConfigConnectivityTypePublicNodePublicMaster GkeConfigConnectivityType = `PUBLIC_NODE_PUBLIC_MASTER` + +// String representation for [fmt.Print] +func (f *GkeConfigConnectivityType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GkeConfigConnectivityType) Set(v string) error { + switch v { + case `PRIVATE_NODE_PUBLIC_MASTER`, `PUBLIC_NODE_PUBLIC_MASTER`: + *f = GkeConfigConnectivityType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PRIVATE_NODE_PUBLIC_MASTER", "PUBLIC_NODE_PUBLIC_MASTER"`, v) + } +} + +// Type always returns GkeConfigConnectivityType to satisfy [pflag.Value] interface +func (f *GkeConfigConnectivityType) Type() string { + return "GkeConfigConnectivityType" +} + +// Possible values are: * `MANAGED_SERVICES`: Encrypts notebook and secret data +// in the control plane * `STORAGE`: Encrypts the workspace's root S3 bucket +// (root DBFS and system data) and, optionally, cluster EBS volumes. +type KeyUseCase string + +// Encrypts notebook and secret data in the control plane +const KeyUseCaseManagedServices KeyUseCase = `MANAGED_SERVICES` + +// Encrypts the workspace's root S3 bucket (root DBFS and system data) and, +// optionally, cluster EBS volumes. +const KeyUseCaseStorage KeyUseCase = `STORAGE` + +// String representation for [fmt.Print] +func (f *KeyUseCase) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *KeyUseCase) Set(v string) error { + switch v { + case `MANAGED_SERVICES`, `STORAGE`: + *f = KeyUseCase(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MANAGED_SERVICES", "STORAGE"`, v) + } +} + +// Type always returns KeyUseCase to satisfy [pflag.Value] interface +func (f *KeyUseCase) Type() string { + return "KeyUseCase" +} + +type Network struct { + // The Databricks account ID associated with this network configuration. + AccountId string `json:"account_id,omitempty"` + // Time in epoch milliseconds when the network was created. + CreationTime int64 `json:"creation_time,omitempty"` + // Array of error messages about the network configuration. + ErrorMessages []NetworkHealth `json:"error_messages,omitempty"` + // The Google Cloud specific information for this network (for example, the + // VPC ID, subnet ID, and secondary IP ranges). + GcpNetworkInfo *GcpNetworkInfo `json:"gcp_network_info,omitempty"` + // The Databricks network configuration ID. + NetworkId string `json:"network_id,omitempty"` + // The human-readable name of the network configuration. + NetworkName string `json:"network_name,omitempty"` + + SecurityGroupIds []string `json:"security_group_ids,omitempty"` + + SubnetIds []string `json:"subnet_ids,omitempty"` + // If specified, contains the VPC endpoints used to allow cluster + // communication from this VPC over [AWS PrivateLink]. + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ + VpcEndpoints *NetworkVpcEndpoints `json:"vpc_endpoints,omitempty"` + // The ID of the VPC associated with this network configuration. VPC IDs can + // be used in multiple networks. + VpcId string `json:"vpc_id,omitempty"` + // The status of this network configuration object in terms of its use in a + // workspace: * `UNATTACHED`: Unattached. * `VALID`: Valid. * `BROKEN`: + // Broken. * `WARNED`: Warned. + VpcStatus VpcStatus `json:"vpc_status,omitempty"` + // Array of warning messages about the network configuration. + WarningMessages []NetworkWarning `json:"warning_messages,omitempty"` + // Workspace ID associated with this network configuration. + WorkspaceId int64 `json:"workspace_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Network) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Network) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type NetworkHealth struct { + // Details of the error. + ErrorMessage string `json:"error_message,omitempty"` + // The AWS resource associated with this error: credentials, VPC, subnet, + // security group, or network ACL. + ErrorType ErrorType `json:"error_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NetworkHealth) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NetworkHealth) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// If specified, contains the VPC endpoints used to allow cluster communication +// from this VPC over [AWS PrivateLink]. +// +// [AWS PrivateLink]: https://aws.amazon.com/privatelink/ +type NetworkVpcEndpoints struct { + // The VPC endpoint ID used by this network to access the Databricks secure + // cluster connectivity relay. + DataplaneRelay []string `json:"dataplane_relay"` + // The VPC endpoint ID used by this network to access the Databricks REST + // API. + RestApi []string `json:"rest_api"` +} + +type NetworkWarning struct { + // Details of the warning. + WarningMessage string `json:"warning_message,omitempty"` + // The AWS resource associated with this warning: a subnet or a security + // group. + WarningType WarningType `json:"warning_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NetworkWarning) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NetworkWarning) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The pricing tier of the workspace. For pricing tier information, see [AWS +// Pricing]. +// +// [AWS Pricing]: https://databricks.com/product/aws-pricing +type PricingTier string + +const PricingTierCommunityEdition PricingTier = `COMMUNITY_EDITION` + +const PricingTierDedicated PricingTier = `DEDICATED` + +const PricingTierEnterprise PricingTier = `ENTERPRISE` + +const PricingTierPremium PricingTier = `PREMIUM` + +const PricingTierStandard PricingTier = `STANDARD` + +const PricingTierUnknown PricingTier = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *PricingTier) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PricingTier) Set(v string) error { + switch v { + case `COMMUNITY_EDITION`, `DEDICATED`, `ENTERPRISE`, `PREMIUM`, `STANDARD`, `UNKNOWN`: + *f = PricingTier(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COMMUNITY_EDITION", "DEDICATED", "ENTERPRISE", "PREMIUM", "STANDARD", "UNKNOWN"`, v) + } +} + +// Type always returns PricingTier to satisfy [pflag.Value] interface +func (f *PricingTier) Type() string { + return "PricingTier" +} + +// The private access level controls which VPC endpoints can connect to the UI +// or API of any workspace that attaches this private access settings object. * +// `ACCOUNT` level access (the default) allows only VPC endpoints that are +// registered in your Databricks account connect to your workspace. * `ENDPOINT` +// level access allows only specified VPC endpoints connect to your workspace. +// For details, see `allowed_vpc_endpoint_ids`. +type PrivateAccessLevel string + +const PrivateAccessLevelAccount PrivateAccessLevel = `ACCOUNT` + +const PrivateAccessLevelEndpoint PrivateAccessLevel = `ENDPOINT` + +// String representation for [fmt.Print] +func (f *PrivateAccessLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PrivateAccessLevel) Set(v string) error { + switch v { + case `ACCOUNT`, `ENDPOINT`: + *f = PrivateAccessLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACCOUNT", "ENDPOINT"`, v) + } +} + +// Type always returns PrivateAccessLevel to satisfy [pflag.Value] interface +func (f *PrivateAccessLevel) Type() string { + return "PrivateAccessLevel" +} + +type PrivateAccessSettings struct { + // The Databricks account ID that hosts the credential. + AccountId string `json:"account_id,omitempty"` + // An array of Databricks VPC endpoint IDs. + AllowedVpcEndpointIds []string `json:"allowed_vpc_endpoint_ids,omitempty"` + // The private access level controls which VPC endpoints can connect to the + // UI or API of any workspace that attaches this private access settings + // object. * `ACCOUNT` level access (the default) allows only VPC endpoints + // that are registered in your Databricks account connect to your workspace. + // * `ENDPOINT` level access allows only specified VPC endpoints connect to + // your workspace. For details, see `allowed_vpc_endpoint_ids`. + PrivateAccessLevel PrivateAccessLevel `json:"private_access_level,omitempty"` + // Databricks private access settings ID. + PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"` + // The human-readable name of the private access settings object. + PrivateAccessSettingsName string `json:"private_access_settings_name,omitempty"` + // Determines if the workspace can be accessed over public internet. For + // fully private workspaces, you can optionally specify `false`, but only if + // you implement both the front-end and the back-end PrivateLink + // connections. Otherwise, specify `true`, which means that public access is + // enabled. + PublicAccessEnabled bool `json:"public_access_enabled,omitempty"` + // The cloud region for workspaces attached to this private access settings + // object. + Region string `json:"region,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PrivateAccessSettings) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PrivateAccessSettings) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ReplaceResponse struct { +} + +// Root S3 bucket information. +type RootBucketInfo struct { + // The name of the S3 bucket. + BucketName string `json:"bucket_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RootBucketInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RootBucketInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type StorageConfiguration struct { + // The Databricks account ID that hosts the credential. + AccountId string `json:"account_id,omitempty"` + // Time in epoch milliseconds when the storage configuration was created. + CreationTime int64 `json:"creation_time,omitempty"` + // Root S3 bucket information. + RootBucketInfo *RootBucketInfo `json:"root_bucket_info,omitempty"` + // Databricks storage configuration ID. + StorageConfigurationId string `json:"storage_configuration_id,omitempty"` + // The human-readable name of the storage configuration. + StorageConfigurationName string `json:"storage_configuration_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *StorageConfiguration) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s StorageConfiguration) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type StsRole struct { + // The external ID that needs to be trusted by the cross-account role. This + // is always your Databricks account ID. + ExternalId string `json:"external_id,omitempty"` + // The Amazon Resource Name (ARN) of the cross account role. + RoleArn string `json:"role_arn,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *StsRole) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s StsRole) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateResponse struct { +} + +type UpdateWorkspaceRequest struct { + // The AWS region of the workspace's data plane (for example, `us-west-2`). + // This parameter is available only for updating failed workspaces. + AwsRegion string `json:"aws_region,omitempty"` + // ID of the workspace's credential configuration object. This parameter is + // available for updating both failed and running workspaces. + CredentialsId string `json:"credentials_id,omitempty"` + // The custom tags key-value pairing that is attached to this workspace. The + // key-value pair is a string of utf-8 characters. The value can be an empty + // string, with maximum length of 255 characters. The key can be of maximum + // length of 127 characters, and cannot be empty. + CustomTags map[string]string `json:"custom_tags,omitempty"` + // The ID of the workspace's managed services encryption key configuration + // object. This parameter is available only for updating failed workspaces. + ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"` + + NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` + // The ID of the workspace's network configuration object. Used only if you + // already use a customer-managed VPC. For failed workspaces only, you can + // switch from a Databricks-managed VPC to a customer-managed VPC by + // updating the workspace to add a network configuration ID. + NetworkId string `json:"network_id,omitempty"` + // The ID of the workspace's private access settings configuration object. + // This parameter is available only for updating failed workspaces. + PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"` + // The ID of the workspace's storage configuration object. This parameter is + // available only for updating failed workspaces. + StorageConfigurationId string `json:"storage_configuration_id,omitempty"` + // The ID of the key configuration object for workspace storage. This + // parameter is available for updating both failed and running workspaces. + StorageCustomerManagedKeyId string `json:"storage_customer_managed_key_id,omitempty"` + // Workspace ID. + WorkspaceId int64 `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateWorkspaceRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateWorkspaceRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpsertPrivateAccessSettingsRequest struct { + // An array of Databricks VPC endpoint IDs. This is the Databricks ID that + // is returned when registering the VPC endpoint configuration in your + // Databricks account. This is not the ID of the VPC endpoint in AWS. + // + // Only used when `private_access_level` is set to `ENDPOINT`. This is an + // allow list of VPC endpoints that in your account that can connect to your + // workspace over AWS PrivateLink. + // + // If hybrid access to your workspace is enabled by setting + // `public_access_enabled` to `true`, this control only works for + // PrivateLink connections. To control how your workspace is accessed via + // public internet, see [IP access lists]. + // + // [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html + AllowedVpcEndpointIds []string `json:"allowed_vpc_endpoint_ids,omitempty"` + // The private access level controls which VPC endpoints can connect to the + // UI or API of any workspace that attaches this private access settings + // object. * `ACCOUNT` level access (the default) allows only VPC endpoints + // that are registered in your Databricks account connect to your workspace. + // * `ENDPOINT` level access allows only specified VPC endpoints connect to + // your workspace. For details, see `allowed_vpc_endpoint_ids`. + PrivateAccessLevel PrivateAccessLevel `json:"private_access_level,omitempty"` + // Databricks Account API private access settings ID. + PrivateAccessSettingsId string `json:"-" url:"-"` + // The human-readable name of the private access settings object. + PrivateAccessSettingsName string `json:"private_access_settings_name"` + // Determines if the workspace can be accessed over public internet. For + // fully private workspaces, you can optionally specify `false`, but only if + // you implement both the front-end and the back-end PrivateLink + // connections. Otherwise, specify `true`, which means that public access is + // enabled. + PublicAccessEnabled bool `json:"public_access_enabled,omitempty"` + // The cloud region for workspaces associated with this private access + // settings object. + Region string `json:"region"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpsertPrivateAccessSettingsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpsertPrivateAccessSettingsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type VpcEndpoint struct { + // The Databricks account ID that hosts the VPC endpoint configuration. + AccountId string `json:"account_id,omitempty"` + // The AWS Account in which the VPC endpoint object exists. + AwsAccountId string `json:"aws_account_id,omitempty"` + // The ID of the Databricks [endpoint service] that this VPC endpoint is + // connected to. For a list of endpoint service IDs for each supported AWS + // region, see the [Databricks PrivateLink documentation]. + // + // [Databricks PrivateLink documentation]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + // [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html + AwsEndpointServiceId string `json:"aws_endpoint_service_id,omitempty"` + // The ID of the VPC endpoint object in AWS. + AwsVpcEndpointId string `json:"aws_vpc_endpoint_id,omitempty"` + // The Google Cloud specific information for this Private Service Connect + // endpoint. + GcpVpcEndpointInfo *GcpVpcEndpointInfo `json:"gcp_vpc_endpoint_info,omitempty"` + // The AWS region in which this VPC endpoint object exists. + Region string `json:"region,omitempty"` + // The current state (such as `available` or `rejected`) of the VPC + // endpoint. Derived from AWS. For the full set of values, see [AWS + // DescribeVpcEndpoint documentation]. + // + // [AWS DescribeVpcEndpoint documentation]: https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-vpc-endpoints.html + State string `json:"state,omitempty"` + // This enumeration represents the type of Databricks VPC [endpoint service] + // that was used when creating this VPC endpoint. + // + // [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html + UseCase EndpointUseCase `json:"use_case,omitempty"` + // Databricks VPC endpoint ID. This is the Databricks-specific name of the + // VPC endpoint. Do not confuse this with the `aws_vpc_endpoint_id`, which + // is the ID within AWS of the VPC endpoint. + VpcEndpointId string `json:"vpc_endpoint_id,omitempty"` + // The human-readable name of the storage configuration. + VpcEndpointName string `json:"vpc_endpoint_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *VpcEndpoint) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s VpcEndpoint) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The status of this network configuration object in terms of its use in a +// workspace: * `UNATTACHED`: Unattached. * `VALID`: Valid. * `BROKEN`: Broken. +// * `WARNED`: Warned. +type VpcStatus string + +// Broken. +const VpcStatusBroken VpcStatus = `BROKEN` + +// Unattached. +const VpcStatusUnattached VpcStatus = `UNATTACHED` + +// Valid. +const VpcStatusValid VpcStatus = `VALID` + +// Warned. +const VpcStatusWarned VpcStatus = `WARNED` + +// String representation for [fmt.Print] +func (f *VpcStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *VpcStatus) Set(v string) error { + switch v { + case `BROKEN`, `UNATTACHED`, `VALID`, `WARNED`: + *f = VpcStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BROKEN", "UNATTACHED", "VALID", "WARNED"`, v) + } +} + +// Type always returns VpcStatus to satisfy [pflag.Value] interface +func (f *VpcStatus) Type() string { + return "VpcStatus" +} + +// The AWS resource associated with this warning: a subnet or a security group. +type WarningType string + +const WarningTypeSecurityGroup WarningType = `securityGroup` + +const WarningTypeSubnet WarningType = `subnet` + +// String representation for [fmt.Print] +func (f *WarningType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WarningType) Set(v string) error { + switch v { + case `securityGroup`, `subnet`: + *f = WarningType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "securityGroup", "subnet"`, v) + } +} + +// Type always returns WarningType to satisfy [pflag.Value] interface +func (f *WarningType) Type() string { + return "WarningType" +} + +type Workspace struct { + // Databricks account ID. + AccountId string `json:"account_id,omitempty"` + // The AWS region of the workspace data plane (for example, `us-west-2`). + AwsRegion string `json:"aws_region,omitempty"` + + AzureWorkspaceInfo *AzureWorkspaceInfo `json:"azure_workspace_info,omitempty"` + // The cloud name. This field always has the value `gcp`. + Cloud string `json:"cloud,omitempty"` + // The general workspace configurations that are specific to cloud + // providers. + CloudResourceContainer *CloudResourceContainer `json:"cloud_resource_container,omitempty"` + // Time in epoch milliseconds when the workspace was created. + CreationTime int64 `json:"creation_time,omitempty"` + // ID of the workspace's credential configuration object. + CredentialsId string `json:"credentials_id,omitempty"` + // The custom tags key-value pairing that is attached to this workspace. The + // key-value pair is a string of utf-8 characters. The value can be an empty + // string, with maximum length of 255 characters. The key can be of maximum + // length of 127 characters, and cannot be empty. + CustomTags map[string]string `json:"custom_tags,omitempty"` + // The deployment name defines part of the subdomain for the workspace. The + // workspace URL for web application and REST APIs is + // `.cloud.databricks.com`. + // + // This value must be unique across all non-deleted deployments across all + // AWS regions. + DeploymentName string `json:"deployment_name,omitempty"` + // If this workspace is for a external customer, then external_customer_info + // is populated. If this workspace is not for a external customer, then + // external_customer_info is empty. + ExternalCustomerInfo *ExternalCustomerInfo `json:"external_customer_info,omitempty"` + // The network settings for the workspace. The configurations are only for + // Databricks-managed VPCs. It is ignored if you specify a customer-managed + // VPC in the `network_id` field.", All the IP range configurations must be + // mutually exclusive. An attempt to create a workspace fails if Databricks + // detects an IP range overlap. + // + // Specify custom IP ranges in CIDR format. The IP ranges for these fields + // must not overlap, and all IP addresses must be entirely within the + // following ranges: `10.0.0.0/8`, `100.64.0.0/10`, `172.16.0.0/12`, + // `192.168.0.0/16`, and `240.0.0.0/4`. + // + // The sizes of these IP ranges affect the maximum number of nodes for the + // workspace. + // + // **Important**: Confirm the IP ranges used by your Databricks workspace + // before creating the workspace. You cannot change them after your + // workspace is deployed. If the IP address ranges for your Databricks are + // too small, IP exhaustion can occur, causing your Databricks jobs to fail. + // To determine the address range sizes that you need, Databricks provides a + // calculator as a Microsoft Excel spreadsheet. See [calculate subnet sizes + // for a new workspace]. + // + // [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html + GcpManagedNetworkConfig *GcpManagedNetworkConfig `json:"gcp_managed_network_config,omitempty"` + // The configurations for the GKE cluster of a Databricks workspace. + GkeConfig *GkeConfig `json:"gke_config,omitempty"` + // Whether no public IP is enabled for the workspace. + IsNoPublicIpEnabled bool `json:"is_no_public_ip_enabled,omitempty"` + // The Google Cloud region of the workspace data plane in your Google + // account (for example, `us-east4`). + Location string `json:"location,omitempty"` + // ID of the key configuration for encrypting managed services. + ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"` + // The network configuration ID that is attached to the workspace. This + // field is available only if the network is a customer-managed network. + NetworkId string `json:"network_id,omitempty"` + // The pricing tier of the workspace. For pricing tier information, see [AWS + // Pricing]. + // + // [AWS Pricing]: https://databricks.com/product/aws-pricing + PricingTier PricingTier `json:"pricing_tier,omitempty"` + // ID of the workspace's private access settings object. Only used for + // PrivateLink. You must specify this ID if you are using [AWS PrivateLink] + // for either front-end (user-to-workspace connection), back-end (data plane + // to control plane connection), or both connection types. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink].", + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"` + // ID of the workspace's storage configuration object. + StorageConfigurationId string `json:"storage_configuration_id,omitempty"` + // ID of the key configuration for encrypting workspace storage. + StorageCustomerManagedKeyId string `json:"storage_customer_managed_key_id,omitempty"` + // A unique integer ID for the workspace + WorkspaceId int64 `json:"workspace_id,omitempty"` + // The human-readable name of the workspace. + WorkspaceName string `json:"workspace_name,omitempty"` + // The status of the workspace. For workspace creation, usually it is set to + // `PROVISIONING` initially. Continue to check the status until the status + // is `RUNNING`. + WorkspaceStatus WorkspaceStatus `json:"workspace_status,omitempty"` + // Message describing the current workspace status. + WorkspaceStatusMessage string `json:"workspace_status_message,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Workspace) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Workspace) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The status of the workspace. For workspace creation, usually it is set to +// `PROVISIONING` initially. Continue to check the status until the status is +// `RUNNING`. +type WorkspaceStatus string + +const WorkspaceStatusBanned WorkspaceStatus = `BANNED` + +const WorkspaceStatusCancelling WorkspaceStatus = `CANCELLING` + +const WorkspaceStatusFailed WorkspaceStatus = `FAILED` + +const WorkspaceStatusNotProvisioned WorkspaceStatus = `NOT_PROVISIONED` + +const WorkspaceStatusProvisioning WorkspaceStatus = `PROVISIONING` + +const WorkspaceStatusRunning WorkspaceStatus = `RUNNING` + +// String representation for [fmt.Print] +func (f *WorkspaceStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WorkspaceStatus) Set(v string) error { + switch v { + case `BANNED`, `CANCELLING`, `FAILED`, `NOT_PROVISIONED`, `PROVISIONING`, `RUNNING`: + *f = WorkspaceStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BANNED", "CANCELLING", "FAILED", "NOT_PROVISIONED", "PROVISIONING", "RUNNING"`, v) + } +} + +// Type always returns WorkspaceStatus to satisfy [pflag.Value] interface +func (f *WorkspaceStatus) Type() string { + return "WorkspaceStatus" +} diff --git a/serving/v2preview/api.go b/serving/v2preview/api.go new file mode 100755 index 000000000..cc9cc1f97 --- /dev/null +++ b/serving/v2preview/api.go @@ -0,0 +1,306 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Serving Endpoints, Serving Endpoints Data Plane Preview, Serving Endpoints Preview, etc. +package servingpreview + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" +) + +type ServingEndpointsInterface interface { +} + +func NewServingEndpoints(client *client.DatabricksClient) *ServingEndpointsAPI { + return &ServingEndpointsAPI{ + servingEndpointsImpl: servingEndpointsImpl{ + client: client, + }, + } +} + +// The Serving Endpoints API allows you to create, update, and delete model +// serving endpoints. +// +// You can use a serving endpoint to serve models from the Databricks Model +// Registry or from Unity Catalog. Endpoints expose the underlying models as +// scalable REST API endpoints using serverless compute. This means the +// endpoints and associated compute resources are fully managed by Databricks +// and will not appear in your cloud account. A serving endpoint can consist of +// one or more MLflow models from the Databricks Model Registry, called served +// entities. A serving endpoint can have at most ten served entities. You can +// configure traffic settings to define how requests should be routed to your +// served entities behind an endpoint. Additionally, you can configure the scale +// of resources that should be applied to each served entity. +type ServingEndpointsAPI struct { + servingEndpointsImpl +} + +type ServingEndpointsDataPlanePreviewInterface interface { + + // Query a serving endpoint. + Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) +} + +func NewServingEndpointsDataPlanePreview(client *client.DatabricksClient) *ServingEndpointsDataPlanePreviewAPI { + return &ServingEndpointsDataPlanePreviewAPI{ + servingEndpointsDataPlanePreviewImpl: servingEndpointsDataPlanePreviewImpl{ + client: client, + }, + } +} + +// Serving endpoints DataPlane provides a set of operations to interact with +// data plane endpoints for Serving endpoints service. +type ServingEndpointsDataPlanePreviewAPI struct { + servingEndpointsDataPlanePreviewImpl +} + +type ServingEndpointsPreviewInterface interface { + + // Get build logs for a served model. + // + // Retrieves the build logs associated with the provided served model. + BuildLogs(ctx context.Context, request BuildLogsRequest) (*BuildLogsResponse, error) + + // Get build logs for a served model. + // + // Retrieves the build logs associated with the provided served model. + BuildLogsByNameAndServedModelName(ctx context.Context, name string, servedModelName string) (*BuildLogsResponse, error) + + // Create a new serving endpoint. + Create(ctx context.Context, request CreateServingEndpoint) (*ServingEndpointDetailed, error) + + // Delete a serving endpoint. + Delete(ctx context.Context, request DeleteServingEndpointRequest) error + + // Delete a serving endpoint. + DeleteByName(ctx context.Context, name string) error + + // Get metrics of a serving endpoint. + // + // Retrieves the metrics associated with the provided serving endpoint in either + // Prometheus or OpenMetrics exposition format. + ExportMetrics(ctx context.Context, request ExportMetricsRequest) (*ExportMetricsResponse, error) + + // Get metrics of a serving endpoint. + // + // Retrieves the metrics associated with the provided serving endpoint in either + // Prometheus or OpenMetrics exposition format. + ExportMetricsByName(ctx context.Context, name string) (*ExportMetricsResponse, error) + + // Get a single serving endpoint. + // + // Retrieves the details for a single serving endpoint. + Get(ctx context.Context, request GetServingEndpointRequest) (*ServingEndpointDetailed, error) + + // Get a single serving endpoint. + // + // Retrieves the details for a single serving endpoint. + GetByName(ctx context.Context, name string) (*ServingEndpointDetailed, error) + + // Get the schema for a serving endpoint. + // + // Get the query schema of the serving endpoint in OpenAPI format. The schema + // contains information for the supported paths, input and output format and + // datatypes. + GetOpenApi(ctx context.Context, request GetOpenApiRequest) (*GetOpenApiResponse, error) + + // Get the schema for a serving endpoint. + // + // Get the query schema of the serving endpoint in OpenAPI format. The schema + // contains information for the supported paths, input and output format and + // datatypes. + GetOpenApiByName(ctx context.Context, name string) (*GetOpenApiResponse, error) + + // Get serving endpoint permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetServingEndpointPermissionLevelsRequest) (*GetServingEndpointPermissionLevelsResponse, error) + + // Get serving endpoint permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByServingEndpointId(ctx context.Context, servingEndpointId string) (*GetServingEndpointPermissionLevelsResponse, error) + + // Get serving endpoint permissions. + // + // Gets the permissions of a serving endpoint. Serving endpoints can inherit + // permissions from their root object. + GetPermissions(ctx context.Context, request GetServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) + + // Get serving endpoint permissions. + // + // Gets the permissions of a serving endpoint. Serving endpoints can inherit + // permissions from their root object. + GetPermissionsByServingEndpointId(ctx context.Context, servingEndpointId string) (*ServingEndpointPermissions, error) + + // Make external services call using the credentials stored in UC Connection. + HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*HttpRequestResponse, error) + + // Get all serving endpoints. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context) listing.Iterator[ServingEndpoint] + + // Get all serving endpoints. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context) ([]ServingEndpoint, error) + + // Get the latest logs for a served model. + // + // Retrieves the service logs associated with the provided served model. + Logs(ctx context.Context, request LogsRequest) (*ServerLogsResponse, error) + + // Get the latest logs for a served model. + // + // Retrieves the service logs associated with the provided served model. + LogsByNameAndServedModelName(ctx context.Context, name string, servedModelName string) (*ServerLogsResponse, error) + + // Update tags of a serving endpoint. + // + // Used to batch add and delete tags from a serving endpoint with a single API + // call. + Patch(ctx context.Context, request PatchServingEndpointTags) (*EndpointTags, error) + + // Update rate limits of a serving endpoint. + // + // Used to update the rate limits of a serving endpoint. NOTE: Only foundation + // model endpoints are currently supported. For external models, use AI Gateway + // to manage rate limits. + Put(ctx context.Context, request PutRequest) (*PutResponse, error) + + // Update AI Gateway of a serving endpoint. + // + // Used to update the AI Gateway of a serving endpoint. NOTE: Only external + // model and provisioned throughput endpoints are currently supported. + PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) + + // Query a serving endpoint. + Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) + + // Set serving endpoint permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request ServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) + + // Update config of a serving endpoint. + // + // Updates any combination of the serving endpoint's served entities, the + // compute configuration of those served entities, and the endpoint's traffic + // config. An endpoint that already has an update in progress can not be updated + // until the current update completes or fails. + UpdateConfig(ctx context.Context, request EndpointCoreConfigInput) (*ServingEndpointDetailed, error) + + // Update serving endpoint permissions. + // + // Updates the permissions on a serving endpoint. Serving endpoints can inherit + // permissions from their root object. + UpdatePermissions(ctx context.Context, request ServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) +} + +func NewServingEndpointsPreview(client *client.DatabricksClient) *ServingEndpointsPreviewAPI { + return &ServingEndpointsPreviewAPI{ + servingEndpointsPreviewImpl: servingEndpointsPreviewImpl{ + client: client, + }, + } +} + +// The Serving Endpoints API allows you to create, update, and delete model +// serving endpoints. +// +// You can use a serving endpoint to serve models from the Databricks Model +// Registry or from Unity Catalog. Endpoints expose the underlying models as +// scalable REST API endpoints using serverless compute. This means the +// endpoints and associated compute resources are fully managed by Databricks +// and will not appear in your cloud account. A serving endpoint can consist of +// one or more MLflow models from the Databricks Model Registry, called served +// entities. A serving endpoint can have at most ten served entities. You can +// configure traffic settings to define how requests should be routed to your +// served entities behind an endpoint. Additionally, you can configure the scale +// of resources that should be applied to each served entity. +type ServingEndpointsPreviewAPI struct { + servingEndpointsPreviewImpl +} + +// Get build logs for a served model. +// +// Retrieves the build logs associated with the provided served model. +func (a *ServingEndpointsPreviewAPI) BuildLogsByNameAndServedModelName(ctx context.Context, name string, servedModelName string) (*BuildLogsResponse, error) { + return a.servingEndpointsPreviewImpl.BuildLogs(ctx, BuildLogsRequest{ + Name: name, + ServedModelName: servedModelName, + }) +} + +// Delete a serving endpoint. +func (a *ServingEndpointsPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.servingEndpointsPreviewImpl.Delete(ctx, DeleteServingEndpointRequest{ + Name: name, + }) +} + +// Get metrics of a serving endpoint. +// +// Retrieves the metrics associated with the provided serving endpoint in either +// Prometheus or OpenMetrics exposition format. +func (a *ServingEndpointsPreviewAPI) ExportMetricsByName(ctx context.Context, name string) (*ExportMetricsResponse, error) { + return a.servingEndpointsPreviewImpl.ExportMetrics(ctx, ExportMetricsRequest{ + Name: name, + }) +} + +// Get a single serving endpoint. +// +// Retrieves the details for a single serving endpoint. +func (a *ServingEndpointsPreviewAPI) GetByName(ctx context.Context, name string) (*ServingEndpointDetailed, error) { + return a.servingEndpointsPreviewImpl.Get(ctx, GetServingEndpointRequest{ + Name: name, + }) +} + +// Get the schema for a serving endpoint. +// +// Get the query schema of the serving endpoint in OpenAPI format. The schema +// contains information for the supported paths, input and output format and +// datatypes. +func (a *ServingEndpointsPreviewAPI) GetOpenApiByName(ctx context.Context, name string) (*GetOpenApiResponse, error) { + return a.servingEndpointsPreviewImpl.GetOpenApi(ctx, GetOpenApiRequest{ + Name: name, + }) +} + +// Get serving endpoint permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *ServingEndpointsPreviewAPI) GetPermissionLevelsByServingEndpointId(ctx context.Context, servingEndpointId string) (*GetServingEndpointPermissionLevelsResponse, error) { + return a.servingEndpointsPreviewImpl.GetPermissionLevels(ctx, GetServingEndpointPermissionLevelsRequest{ + ServingEndpointId: servingEndpointId, + }) +} + +// Get serving endpoint permissions. +// +// Gets the permissions of a serving endpoint. Serving endpoints can inherit +// permissions from their root object. +func (a *ServingEndpointsPreviewAPI) GetPermissionsByServingEndpointId(ctx context.Context, servingEndpointId string) (*ServingEndpointPermissions, error) { + return a.servingEndpointsPreviewImpl.GetPermissions(ctx, GetServingEndpointPermissionsRequest{ + ServingEndpointId: servingEndpointId, + }) +} + +// Get the latest logs for a served model. +// +// Retrieves the service logs associated with the provided served model. +func (a *ServingEndpointsPreviewAPI) LogsByNameAndServedModelName(ctx context.Context, name string, servedModelName string) (*ServerLogsResponse, error) { + return a.servingEndpointsPreviewImpl.Logs(ctx, LogsRequest{ + Name: name, + ServedModelName: servedModelName, + }) +} diff --git a/serving/v2preview/client.go b/serving/v2preview/client.go new file mode 100755 index 000000000..e81c0c789 --- /dev/null +++ b/serving/v2preview/client.go @@ -0,0 +1,113 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package servingpreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type ServingEndpointsClient struct { + ServingEndpointsInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewServingEndpointsClient(cfg *config.Config) (*ServingEndpointsClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ServingEndpointsClient{ + Config: cfg, + apiClient: apiClient, + ServingEndpointsInterface: NewServingEndpoints(databricksClient), + }, nil +} + +type ServingEndpointsDataPlanePreviewClient struct { + ServingEndpointsDataPlanePreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewServingEndpointsDataPlanePreviewClient(cfg *config.Config) (*ServingEndpointsDataPlanePreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ServingEndpointsDataPlanePreviewClient{ + Config: cfg, + apiClient: apiClient, + ServingEndpointsDataPlanePreviewInterface: NewServingEndpointsDataPlanePreview(databricksClient), + }, nil +} + +type ServingEndpointsPreviewClient struct { + ServingEndpointsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewServingEndpointsPreviewClient(cfg *config.Config) (*ServingEndpointsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ServingEndpointsPreviewClient{ + Config: cfg, + apiClient: apiClient, + ServingEndpointsPreviewInterface: NewServingEndpointsPreview(databricksClient), + }, nil +} diff --git a/serving/v2preview/impl.go b/serving/v2preview/impl.go new file mode 100755 index 000000000..120e26f72 --- /dev/null +++ b/serving/v2preview/impl.go @@ -0,0 +1,253 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package servingpreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just ServingEndpoints API methods +type servingEndpointsImpl struct { + client *client.DatabricksClient +} + +// unexported type that holds implementations of just ServingEndpointsDataPlanePreview API methods +type servingEndpointsDataPlanePreviewImpl struct { + client *client.DatabricksClient +} + +func (a *servingEndpointsDataPlanePreviewImpl) Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) { + var queryEndpointResponse QueryEndpointResponse + path := fmt.Sprintf("/api/preview//serving-endpoints/%v/invocations", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &queryEndpointResponse) + return &queryEndpointResponse, err +} + +// unexported type that holds implementations of just ServingEndpointsPreview API methods +type servingEndpointsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *servingEndpointsPreviewImpl) BuildLogs(ctx context.Context, request BuildLogsRequest) (*BuildLogsResponse, error) { + var buildLogsResponse BuildLogsResponse + path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/served-models/%v/build-logs", request.Name, request.ServedModelName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &buildLogsResponse) + return &buildLogsResponse, err +} + +func (a *servingEndpointsPreviewImpl) Create(ctx context.Context, request CreateServingEndpoint) (*ServingEndpointDetailed, error) { + var servingEndpointDetailed ServingEndpointDetailed + path := "/api/2.0preview/serving-endpoints" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &servingEndpointDetailed) + return &servingEndpointDetailed, err +} + +func (a *servingEndpointsPreviewImpl) Delete(ctx context.Context, request DeleteServingEndpointRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *servingEndpointsPreviewImpl) ExportMetrics(ctx context.Context, request ExportMetricsRequest) (*ExportMetricsResponse, error) { + var exportMetricsResponse ExportMetricsResponse + path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/metrics", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "text/plain" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &exportMetricsResponse) + return &exportMetricsResponse, err +} + +func (a *servingEndpointsPreviewImpl) Get(ctx context.Context, request GetServingEndpointRequest) (*ServingEndpointDetailed, error) { + var servingEndpointDetailed ServingEndpointDetailed + path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &servingEndpointDetailed) + return &servingEndpointDetailed, err +} + +func (a *servingEndpointsPreviewImpl) GetOpenApi(ctx context.Context, request GetOpenApiRequest) (*GetOpenApiResponse, error) { + var getOpenApiResponse GetOpenApiResponse + path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/openapi", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "text/plain" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getOpenApiResponse) + return &getOpenApiResponse, err +} + +func (a *servingEndpointsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetServingEndpointPermissionLevelsRequest) (*GetServingEndpointPermissionLevelsResponse, error) { + var getServingEndpointPermissionLevelsResponse GetServingEndpointPermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/serving-endpoints/%v/permissionLevels", request.ServingEndpointId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getServingEndpointPermissionLevelsResponse) + return &getServingEndpointPermissionLevelsResponse, err +} + +func (a *servingEndpointsPreviewImpl) GetPermissions(ctx context.Context, request GetServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { + var servingEndpointPermissions ServingEndpointPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/serving-endpoints/%v", request.ServingEndpointId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &servingEndpointPermissions) + return &servingEndpointPermissions, err +} + +func (a *servingEndpointsPreviewImpl) HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*HttpRequestResponse, error) { + var httpRequestResponse HttpRequestResponse + path := "/api/2.0preview/external-function" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "text/plain" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &httpRequestResponse) + return &httpRequestResponse, err +} + +// Get all serving endpoints. +func (a *servingEndpointsPreviewImpl) List(ctx context.Context) listing.Iterator[ServingEndpoint] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListEndpointsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListEndpointsResponse) []ServingEndpoint { + return resp.Endpoints + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all serving endpoints. +func (a *servingEndpointsPreviewImpl) ListAll(ctx context.Context) ([]ServingEndpoint, error) { + iterator := a.List(ctx) + return listing.ToSlice[ServingEndpoint](ctx, iterator) +} +func (a *servingEndpointsPreviewImpl) internalList(ctx context.Context) (*ListEndpointsResponse, error) { + var listEndpointsResponse ListEndpointsResponse + path := "/api/2.0preview/serving-endpoints" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listEndpointsResponse) + return &listEndpointsResponse, err +} + +func (a *servingEndpointsPreviewImpl) Logs(ctx context.Context, request LogsRequest) (*ServerLogsResponse, error) { + var serverLogsResponse ServerLogsResponse + path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/served-models/%v/logs", request.Name, request.ServedModelName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &serverLogsResponse) + return &serverLogsResponse, err +} + +func (a *servingEndpointsPreviewImpl) Patch(ctx context.Context, request PatchServingEndpointTags) (*EndpointTags, error) { + var endpointTags EndpointTags + path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/tags", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &endpointTags) + return &endpointTags, err +} + +func (a *servingEndpointsPreviewImpl) Put(ctx context.Context, request PutRequest) (*PutResponse, error) { + var putResponse PutResponse + path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/rate-limits", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &putResponse) + return &putResponse, err +} + +func (a *servingEndpointsPreviewImpl) PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) { + var putAiGatewayResponse PutAiGatewayResponse + path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/ai-gateway", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &putAiGatewayResponse) + return &putAiGatewayResponse, err +} + +func (a *servingEndpointsPreviewImpl) Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) { + var queryEndpointResponse QueryEndpointResponse + path := fmt.Sprintf("/api/preview//serving-endpoints/%v/invocations", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &queryEndpointResponse) + return &queryEndpointResponse, err +} + +func (a *servingEndpointsPreviewImpl) SetPermissions(ctx context.Context, request ServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { + var servingEndpointPermissions ServingEndpointPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/serving-endpoints/%v", request.ServingEndpointId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &servingEndpointPermissions) + return &servingEndpointPermissions, err +} + +func (a *servingEndpointsPreviewImpl) UpdateConfig(ctx context.Context, request EndpointCoreConfigInput) (*ServingEndpointDetailed, error) { + var servingEndpointDetailed ServingEndpointDetailed + path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/config", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &servingEndpointDetailed) + return &servingEndpointDetailed, err +} + +func (a *servingEndpointsPreviewImpl) UpdatePermissions(ctx context.Context, request ServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { + var servingEndpointPermissions ServingEndpointPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/serving-endpoints/%v", request.ServingEndpointId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &servingEndpointPermissions) + return &servingEndpointPermissions, err +} diff --git a/serving/v2preview/model.go b/serving/v2preview/model.go new file mode 100755 index 000000000..ec28dcb4a --- /dev/null +++ b/serving/v2preview/model.go @@ -0,0 +1,2153 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package servingpreview + +import ( + "fmt" + "io" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type Ai21LabsConfig struct { + // The Databricks secret key reference for an AI21 Labs API key. If you + // prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. + // You must provide an API key using one of the following fields: + // `ai21labs_api_key` or `ai21labs_api_key_plaintext`. + Ai21labsApiKey string `json:"ai21labs_api_key,omitempty"` + // An AI21 Labs API key provided as a plaintext string. If you prefer to + // reference your key using Databricks Secrets, see `ai21labs_api_key`. You + // must provide an API key using one of the following fields: + // `ai21labs_api_key` or `ai21labs_api_key_plaintext`. + Ai21labsApiKeyPlaintext string `json:"ai21labs_api_key_plaintext,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Ai21LabsConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Ai21LabsConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AiGatewayConfig struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `json:"guardrails,omitempty"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality. + InferenceTableConfig *AiGatewayInferenceTableConfig `json:"inference_table_config,omitempty"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `json:"rate_limits,omitempty"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `json:"usage_tracking_config,omitempty"` +} + +type AiGatewayGuardrailParameters struct { + // List of invalid keywords. AI guardrail uses keyword or string matching to + // decide if the keyword exists in the request or response content. + InvalidKeywords []string `json:"invalid_keywords,omitempty"` + // Configuration for guardrail PII filter. + Pii *AiGatewayGuardrailPiiBehavior `json:"pii,omitempty"` + // Indicates whether the safety filter is enabled. + Safety bool `json:"safety,omitempty"` + // The list of allowed topics. Given a chat request, this guardrail flags + // the request if its topic is not in the allowed topics. + ValidTopics []string `json:"valid_topics,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AiGatewayGuardrailParameters) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AiGatewayGuardrailParameters) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AiGatewayGuardrailPiiBehavior struct { + // Configuration for input guardrail filters. + Behavior AiGatewayGuardrailPiiBehaviorBehavior `json:"behavior,omitempty"` +} + +type AiGatewayGuardrailPiiBehaviorBehavior string + +const AiGatewayGuardrailPiiBehaviorBehaviorBlock AiGatewayGuardrailPiiBehaviorBehavior = `BLOCK` + +const AiGatewayGuardrailPiiBehaviorBehaviorNone AiGatewayGuardrailPiiBehaviorBehavior = `NONE` + +// String representation for [fmt.Print] +func (f *AiGatewayGuardrailPiiBehaviorBehavior) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AiGatewayGuardrailPiiBehaviorBehavior) Set(v string) error { + switch v { + case `BLOCK`, `NONE`: + *f = AiGatewayGuardrailPiiBehaviorBehavior(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BLOCK", "NONE"`, v) + } +} + +// Type always returns AiGatewayGuardrailPiiBehaviorBehavior to satisfy [pflag.Value] interface +func (f *AiGatewayGuardrailPiiBehaviorBehavior) Type() string { + return "AiGatewayGuardrailPiiBehaviorBehavior" +} + +type AiGatewayGuardrails struct { + // Configuration for input guardrail filters. + Input *AiGatewayGuardrailParameters `json:"input,omitempty"` + // Configuration for output guardrail filters. + Output *AiGatewayGuardrailParameters `json:"output,omitempty"` +} + +type AiGatewayInferenceTableConfig struct { + // The name of the catalog in Unity Catalog. Required when enabling + // inference tables. NOTE: On update, you have to disable inference table + // first in order to change the catalog name. + CatalogName string `json:"catalog_name,omitempty"` + // Indicates whether the inference table is enabled. + Enabled bool `json:"enabled,omitempty"` + // The name of the schema in Unity Catalog. Required when enabling inference + // tables. NOTE: On update, you have to disable inference table first in + // order to change the schema name. + SchemaName string `json:"schema_name,omitempty"` + // The prefix of the table in Unity Catalog. NOTE: On update, you have to + // disable inference table first in order to change the prefix name. + TableNamePrefix string `json:"table_name_prefix,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AiGatewayInferenceTableConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AiGatewayInferenceTableConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AiGatewayRateLimit struct { + // Used to specify how many calls are allowed for a key within the + // renewal_period. + Calls int64 `json:"calls"` + // Key field for a rate limit. Currently, only 'user' and 'endpoint' are + // supported, with 'endpoint' being the default if not specified. + Key AiGatewayRateLimitKey `json:"key,omitempty"` + // Renewal period field for a rate limit. Currently, only 'minute' is + // supported. + RenewalPeriod AiGatewayRateLimitRenewalPeriod `json:"renewal_period"` +} + +type AiGatewayRateLimitKey string + +const AiGatewayRateLimitKeyEndpoint AiGatewayRateLimitKey = `endpoint` + +const AiGatewayRateLimitKeyUser AiGatewayRateLimitKey = `user` + +// String representation for [fmt.Print] +func (f *AiGatewayRateLimitKey) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AiGatewayRateLimitKey) Set(v string) error { + switch v { + case `endpoint`, `user`: + *f = AiGatewayRateLimitKey(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "endpoint", "user"`, v) + } +} + +// Type always returns AiGatewayRateLimitKey to satisfy [pflag.Value] interface +func (f *AiGatewayRateLimitKey) Type() string { + return "AiGatewayRateLimitKey" +} + +type AiGatewayRateLimitRenewalPeriod string + +const AiGatewayRateLimitRenewalPeriodMinute AiGatewayRateLimitRenewalPeriod = `minute` + +// String representation for [fmt.Print] +func (f *AiGatewayRateLimitRenewalPeriod) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AiGatewayRateLimitRenewalPeriod) Set(v string) error { + switch v { + case `minute`: + *f = AiGatewayRateLimitRenewalPeriod(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "minute"`, v) + } +} + +// Type always returns AiGatewayRateLimitRenewalPeriod to satisfy [pflag.Value] interface +func (f *AiGatewayRateLimitRenewalPeriod) Type() string { + return "AiGatewayRateLimitRenewalPeriod" +} + +type AiGatewayUsageTrackingConfig struct { + // Whether to enable usage tracking. + Enabled bool `json:"enabled,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AiGatewayUsageTrackingConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AiGatewayUsageTrackingConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AmazonBedrockConfig struct { + // The Databricks secret key reference for an AWS access key ID with + // permissions to interact with Bedrock services. If you prefer to paste + // your API key directly, see `aws_access_key_id_plaintext`. You must + // provide an API key using one of the following fields: `aws_access_key_id` + // or `aws_access_key_id_plaintext`. + AwsAccessKeyId string `json:"aws_access_key_id,omitempty"` + // An AWS access key ID with permissions to interact with Bedrock services + // provided as a plaintext string. If you prefer to reference your key using + // Databricks Secrets, see `aws_access_key_id`. You must provide an API key + // using one of the following fields: `aws_access_key_id` or + // `aws_access_key_id_plaintext`. + AwsAccessKeyIdPlaintext string `json:"aws_access_key_id_plaintext,omitempty"` + // The AWS region to use. Bedrock has to be enabled there. + AwsRegion string `json:"aws_region"` + // The Databricks secret key reference for an AWS secret access key paired + // with the access key ID, with permissions to interact with Bedrock + // services. If you prefer to paste your API key directly, see + // `aws_secret_access_key_plaintext`. You must provide an API key using one + // of the following fields: `aws_secret_access_key` or + // `aws_secret_access_key_plaintext`. + AwsSecretAccessKey string `json:"aws_secret_access_key,omitempty"` + // An AWS secret access key paired with the access key ID, with permissions + // to interact with Bedrock services provided as a plaintext string. If you + // prefer to reference your key using Databricks Secrets, see + // `aws_secret_access_key`. You must provide an API key using one of the + // following fields: `aws_secret_access_key` or + // `aws_secret_access_key_plaintext`. + AwsSecretAccessKeyPlaintext string `json:"aws_secret_access_key_plaintext,omitempty"` + // The underlying provider in Amazon Bedrock. Supported values (case + // insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. + BedrockProvider AmazonBedrockConfigBedrockProvider `json:"bedrock_provider"` + + ForceSendFields []string `json:"-"` +} + +func (s *AmazonBedrockConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AmazonBedrockConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AmazonBedrockConfigBedrockProvider string + +const AmazonBedrockConfigBedrockProviderAi21labs AmazonBedrockConfigBedrockProvider = `ai21labs` + +const AmazonBedrockConfigBedrockProviderAmazon AmazonBedrockConfigBedrockProvider = `amazon` + +const AmazonBedrockConfigBedrockProviderAnthropic AmazonBedrockConfigBedrockProvider = `anthropic` + +const AmazonBedrockConfigBedrockProviderCohere AmazonBedrockConfigBedrockProvider = `cohere` + +// String representation for [fmt.Print] +func (f *AmazonBedrockConfigBedrockProvider) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AmazonBedrockConfigBedrockProvider) Set(v string) error { + switch v { + case `ai21labs`, `amazon`, `anthropic`, `cohere`: + *f = AmazonBedrockConfigBedrockProvider(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ai21labs", "amazon", "anthropic", "cohere"`, v) + } +} + +// Type always returns AmazonBedrockConfigBedrockProvider to satisfy [pflag.Value] interface +func (f *AmazonBedrockConfigBedrockProvider) Type() string { + return "AmazonBedrockConfigBedrockProvider" +} + +type AnthropicConfig struct { + // The Databricks secret key reference for an Anthropic API key. If you + // prefer to paste your API key directly, see `anthropic_api_key_plaintext`. + // You must provide an API key using one of the following fields: + // `anthropic_api_key` or `anthropic_api_key_plaintext`. + AnthropicApiKey string `json:"anthropic_api_key,omitempty"` + // The Anthropic API key provided as a plaintext string. If you prefer to + // reference your key using Databricks Secrets, see `anthropic_api_key`. You + // must provide an API key using one of the following fields: + // `anthropic_api_key` or `anthropic_api_key_plaintext`. + AnthropicApiKeyPlaintext string `json:"anthropic_api_key_plaintext,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AnthropicConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AnthropicConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AutoCaptureConfigInput struct { + // The name of the catalog in Unity Catalog. NOTE: On update, you cannot + // change the catalog name if the inference table is already enabled. + CatalogName string `json:"catalog_name,omitempty"` + // Indicates whether the inference table is enabled. + Enabled bool `json:"enabled,omitempty"` + // The name of the schema in Unity Catalog. NOTE: On update, you cannot + // change the schema name if the inference table is already enabled. + SchemaName string `json:"schema_name,omitempty"` + // The prefix of the table in Unity Catalog. NOTE: On update, you cannot + // change the prefix name if the inference table is already enabled. + TableNamePrefix string `json:"table_name_prefix,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AutoCaptureConfigInput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AutoCaptureConfigInput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AutoCaptureConfigOutput struct { + // The name of the catalog in Unity Catalog. NOTE: On update, you cannot + // change the catalog name if the inference table is already enabled. + CatalogName string `json:"catalog_name,omitempty"` + // Indicates whether the inference table is enabled. + Enabled bool `json:"enabled,omitempty"` + // The name of the schema in Unity Catalog. NOTE: On update, you cannot + // change the schema name if the inference table is already enabled. + SchemaName string `json:"schema_name,omitempty"` + + State *AutoCaptureState `json:"state,omitempty"` + // The prefix of the table in Unity Catalog. NOTE: On update, you cannot + // change the prefix name if the inference table is already enabled. + TableNamePrefix string `json:"table_name_prefix,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AutoCaptureConfigOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AutoCaptureConfigOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AutoCaptureState struct { + PayloadTable *PayloadTable `json:"payload_table,omitempty"` +} + +// Get build logs for a served model +type BuildLogsRequest struct { + // The name of the serving endpoint that the served model belongs to. This + // field is required. + Name string `json:"-" url:"-"` + // The name of the served model that build logs will be retrieved for. This + // field is required. + ServedModelName string `json:"-" url:"-"` +} + +type BuildLogsResponse struct { + // The logs associated with building the served entity's environment. + Logs string `json:"logs"` +} + +type ChatMessage struct { + // The content of the message. + Content string `json:"content,omitempty"` + // The role of the message. One of [system, user, assistant]. + Role ChatMessageRole `json:"role,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ChatMessage) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ChatMessage) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The role of the message. One of [system, user, assistant]. +type ChatMessageRole string + +const ChatMessageRoleAssistant ChatMessageRole = `assistant` + +const ChatMessageRoleSystem ChatMessageRole = `system` + +const ChatMessageRoleUser ChatMessageRole = `user` + +// String representation for [fmt.Print] +func (f *ChatMessageRole) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ChatMessageRole) Set(v string) error { + switch v { + case `assistant`, `system`, `user`: + *f = ChatMessageRole(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "assistant", "system", "user"`, v) + } +} + +// Type always returns ChatMessageRole to satisfy [pflag.Value] interface +func (f *ChatMessageRole) Type() string { + return "ChatMessageRole" +} + +type CohereConfig struct { + // This is an optional field to provide a customized base URL for the Cohere + // API. If left unspecified, the standard Cohere base URL is used. + CohereApiBase string `json:"cohere_api_base,omitempty"` + // The Databricks secret key reference for a Cohere API key. If you prefer + // to paste your API key directly, see `cohere_api_key_plaintext`. You must + // provide an API key using one of the following fields: `cohere_api_key` or + // `cohere_api_key_plaintext`. + CohereApiKey string `json:"cohere_api_key,omitempty"` + // The Cohere API key provided as a plaintext string. If you prefer to + // reference your key using Databricks Secrets, see `cohere_api_key`. You + // must provide an API key using one of the following fields: + // `cohere_api_key` or `cohere_api_key_plaintext`. + CohereApiKeyPlaintext string `json:"cohere_api_key_plaintext,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CohereConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CohereConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateServingEndpoint struct { + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model and provisioned throughput endpoints are currently + // supported. + AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` + // The core config of the serving endpoint. + Config *EndpointCoreConfigInput `json:"config,omitempty"` + // The name of the serving endpoint. This field is required and must be + // unique across a Databricks workspace. An endpoint name can consist of + // alphanumeric characters, dashes, and underscores. + Name string `json:"name"` + // Rate limits to be applied to the serving endpoint. NOTE: this field is + // deprecated, please use AI Gateway to manage rate limits. + RateLimits []RateLimit `json:"rate_limits,omitempty"` + // Enable route optimization for the serving endpoint. + RouteOptimized bool `json:"route_optimized,omitempty"` + // Tags to be attached to the serving endpoint and automatically propagated + // to billing logs. + Tags []EndpointTag `json:"tags,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateServingEndpoint) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateServingEndpoint) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Details necessary to query this object's API through the DataPlane APIs. +type DataPlaneInfo struct { + // Authorization details as a string. + AuthorizationDetails string `json:"authorization_details,omitempty"` + // The URL of the endpoint for this operation in the dataplane. + EndpointUrl string `json:"endpoint_url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DataPlaneInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DataPlaneInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DatabricksModelServingConfig struct { + // The Databricks secret key reference for a Databricks API token that + // corresponds to a user or service principal with Can Query access to the + // model serving endpoint pointed to by this external model. If you prefer + // to paste your API key directly, see `databricks_api_token_plaintext`. You + // must provide an API key using one of the following fields: + // `databricks_api_token` or `databricks_api_token_plaintext`. + DatabricksApiToken string `json:"databricks_api_token,omitempty"` + // The Databricks API token that corresponds to a user or service principal + // with Can Query access to the model serving endpoint pointed to by this + // external model provided as a plaintext string. If you prefer to reference + // your key using Databricks Secrets, see `databricks_api_token`. You must + // provide an API key using one of the following fields: + // `databricks_api_token` or `databricks_api_token_plaintext`. + DatabricksApiTokenPlaintext string `json:"databricks_api_token_plaintext,omitempty"` + // The URL of the Databricks workspace containing the model serving endpoint + // pointed to by this external model. + DatabricksWorkspaceUrl string `json:"databricks_workspace_url"` + + ForceSendFields []string `json:"-"` +} + +func (s *DatabricksModelServingConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DatabricksModelServingConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DataframeSplitInput struct { + Columns []any `json:"columns,omitempty"` + + Data []any `json:"data,omitempty"` + + Index []int `json:"index,omitempty"` +} + +type DeleteResponse struct { +} + +// Delete a serving endpoint +type DeleteServingEndpointRequest struct { + Name string `json:"-" url:"-"` +} + +type EmbeddingsV1ResponseEmbeddingElement struct { + Embedding []float64 `json:"embedding,omitempty"` + // The index of the embedding in the response. + Index int `json:"index,omitempty"` + // This will always be 'embedding'. + Object EmbeddingsV1ResponseEmbeddingElementObject `json:"object,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EmbeddingsV1ResponseEmbeddingElement) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EmbeddingsV1ResponseEmbeddingElement) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// This will always be 'embedding'. +type EmbeddingsV1ResponseEmbeddingElementObject string + +const EmbeddingsV1ResponseEmbeddingElementObjectEmbedding EmbeddingsV1ResponseEmbeddingElementObject = `embedding` + +// String representation for [fmt.Print] +func (f *EmbeddingsV1ResponseEmbeddingElementObject) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EmbeddingsV1ResponseEmbeddingElementObject) Set(v string) error { + switch v { + case `embedding`: + *f = EmbeddingsV1ResponseEmbeddingElementObject(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "embedding"`, v) + } +} + +// Type always returns EmbeddingsV1ResponseEmbeddingElementObject to satisfy [pflag.Value] interface +func (f *EmbeddingsV1ResponseEmbeddingElementObject) Type() string { + return "EmbeddingsV1ResponseEmbeddingElementObject" +} + +type EndpointCoreConfigInput struct { + // Configuration for Inference Tables which automatically logs requests and + // responses to Unity Catalog. Note: this field is deprecated for creating + // new provisioned throughput endpoints, or updating existing provisioned + // throughput endpoints that never have inference table configured; in these + // cases please use AI Gateway to manage inference tables. + AutoCaptureConfig *AutoCaptureConfigInput `json:"auto_capture_config,omitempty"` + // The name of the serving endpoint to update. This field is required. + Name string `json:"-" url:"-"` + // The list of served entities under the serving endpoint config. + ServedEntities []ServedEntityInput `json:"served_entities,omitempty"` + // (Deprecated, use served_entities instead) The list of served models under + // the serving endpoint config. + ServedModels []ServedModelInput `json:"served_models,omitempty"` + // The traffic configuration associated with the serving endpoint config. + TrafficConfig *TrafficConfig `json:"traffic_config,omitempty"` +} + +type EndpointCoreConfigOutput struct { + // Configuration for Inference Tables which automatically logs requests and + // responses to Unity Catalog. Note: this field is deprecated for creating + // new provisioned throughput endpoints, or updating existing provisioned + // throughput endpoints that never have inference table configured; in these + // cases please use AI Gateway to manage inference tables. + AutoCaptureConfig *AutoCaptureConfigOutput `json:"auto_capture_config,omitempty"` + // The config version that the serving endpoint is currently serving. + ConfigVersion int64 `json:"config_version,omitempty"` + // The list of served entities under the serving endpoint config. + ServedEntities []ServedEntityOutput `json:"served_entities,omitempty"` + // (Deprecated, use served_entities instead) The list of served models under + // the serving endpoint config. + ServedModels []ServedModelOutput `json:"served_models,omitempty"` + // The traffic configuration associated with the serving endpoint config. + TrafficConfig *TrafficConfig `json:"traffic_config,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EndpointCoreConfigOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EndpointCoreConfigOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EndpointCoreConfigSummary struct { + // The list of served entities under the serving endpoint config. + ServedEntities []ServedEntitySpec `json:"served_entities,omitempty"` + // (Deprecated, use served_entities instead) The list of served models under + // the serving endpoint config. + ServedModels []ServedModelSpec `json:"served_models,omitempty"` +} + +type EndpointPendingConfig struct { + // Configuration for Inference Tables which automatically logs requests and + // responses to Unity Catalog. Note: this field is deprecated for creating + // new provisioned throughput endpoints, or updating existing provisioned + // throughput endpoints that never have inference table configured; in these + // cases please use AI Gateway to manage inference tables. + AutoCaptureConfig *AutoCaptureConfigOutput `json:"auto_capture_config,omitempty"` + // The config version that the serving endpoint is currently serving. + ConfigVersion int `json:"config_version,omitempty"` + // The list of served entities belonging to the last issued update to the + // serving endpoint. + ServedEntities []ServedEntityOutput `json:"served_entities,omitempty"` + // (Deprecated, use served_entities instead) The list of served models + // belonging to the last issued update to the serving endpoint. + ServedModels []ServedModelOutput `json:"served_models,omitempty"` + // The timestamp when the update to the pending config started. + StartTime int64 `json:"start_time,omitempty"` + // The traffic config defining how invocations to the serving endpoint + // should be routed. + TrafficConfig *TrafficConfig `json:"traffic_config,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EndpointPendingConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EndpointPendingConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EndpointState struct { + // The state of an endpoint's config update. This informs the user if the + // pending_config is in progress, if the update failed, or if there is no + // update in progress. Note that if the endpoint's config_update state value + // is IN_PROGRESS, another update can not be made until the update completes + // or fails. + ConfigUpdate EndpointStateConfigUpdate `json:"config_update,omitempty"` + // The state of an endpoint, indicating whether or not the endpoint is + // queryable. An endpoint is READY if all of the served entities in its + // active configuration are ready. If any of the actively served entities + // are in a non-ready state, the endpoint state will be NOT_READY. + Ready EndpointStateReady `json:"ready,omitempty"` +} + +type EndpointStateConfigUpdate string + +const EndpointStateConfigUpdateInProgress EndpointStateConfigUpdate = `IN_PROGRESS` + +const EndpointStateConfigUpdateNotUpdating EndpointStateConfigUpdate = `NOT_UPDATING` + +const EndpointStateConfigUpdateUpdateCanceled EndpointStateConfigUpdate = `UPDATE_CANCELED` + +const EndpointStateConfigUpdateUpdateFailed EndpointStateConfigUpdate = `UPDATE_FAILED` + +// String representation for [fmt.Print] +func (f *EndpointStateConfigUpdate) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointStateConfigUpdate) Set(v string) error { + switch v { + case `IN_PROGRESS`, `NOT_UPDATING`, `UPDATE_CANCELED`, `UPDATE_FAILED`: + *f = EndpointStateConfigUpdate(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "IN_PROGRESS", "NOT_UPDATING", "UPDATE_CANCELED", "UPDATE_FAILED"`, v) + } +} + +// Type always returns EndpointStateConfigUpdate to satisfy [pflag.Value] interface +func (f *EndpointStateConfigUpdate) Type() string { + return "EndpointStateConfigUpdate" +} + +type EndpointStateReady string + +const EndpointStateReadyNotReady EndpointStateReady = `NOT_READY` + +const EndpointStateReadyReady EndpointStateReady = `READY` + +// String representation for [fmt.Print] +func (f *EndpointStateReady) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointStateReady) Set(v string) error { + switch v { + case `NOT_READY`, `READY`: + *f = EndpointStateReady(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "NOT_READY", "READY"`, v) + } +} + +// Type always returns EndpointStateReady to satisfy [pflag.Value] interface +func (f *EndpointStateReady) Type() string { + return "EndpointStateReady" +} + +type EndpointTag struct { + // Key field for a serving endpoint tag. + Key string `json:"key"` + // Optional value field for a serving endpoint tag. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EndpointTag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EndpointTag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EndpointTags struct { + Tags []EndpointTag `json:"tags,omitempty"` +} + +// Get metrics of a serving endpoint +type ExportMetricsRequest struct { + // The name of the serving endpoint to retrieve metrics for. This field is + // required. + Name string `json:"-" url:"-"` +} + +type ExportMetricsResponse struct { + Contents io.ReadCloser `json:"-"` +} + +// Simple Proto message for testing +type ExternalFunctionRequest struct { + // The connection name to use. This is required to identify the external + // connection. + ConnectionName string `json:"connection_name"` + // Additional headers for the request. If not provided, only auth headers + // from connections would be passed. + Headers string `json:"headers,omitempty"` + // The JSON payload to send in the request body. + Json string `json:"json,omitempty"` + // The HTTP method to use (e.g., 'GET', 'POST'). + Method ExternalFunctionRequestHttpMethod `json:"method"` + // Query parameters for the request. + Params string `json:"params,omitempty"` + // The relative path for the API endpoint. This is required. + Path string `json:"path"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExternalFunctionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExternalFunctionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExternalFunctionRequestHttpMethod string + +const ExternalFunctionRequestHttpMethodDelete ExternalFunctionRequestHttpMethod = `DELETE` + +const ExternalFunctionRequestHttpMethodGet ExternalFunctionRequestHttpMethod = `GET` + +const ExternalFunctionRequestHttpMethodPatch ExternalFunctionRequestHttpMethod = `PATCH` + +const ExternalFunctionRequestHttpMethodPost ExternalFunctionRequestHttpMethod = `POST` + +const ExternalFunctionRequestHttpMethodPut ExternalFunctionRequestHttpMethod = `PUT` + +// String representation for [fmt.Print] +func (f *ExternalFunctionRequestHttpMethod) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExternalFunctionRequestHttpMethod) Set(v string) error { + switch v { + case `DELETE`, `GET`, `PATCH`, `POST`, `PUT`: + *f = ExternalFunctionRequestHttpMethod(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELETE", "GET", "PATCH", "POST", "PUT"`, v) + } +} + +// Type always returns ExternalFunctionRequestHttpMethod to satisfy [pflag.Value] interface +func (f *ExternalFunctionRequestHttpMethod) Type() string { + return "ExternalFunctionRequestHttpMethod" +} + +type ExternalModel struct { + // AI21Labs Config. Only required if the provider is 'ai21labs'. + Ai21labsConfig *Ai21LabsConfig `json:"ai21labs_config,omitempty"` + // Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'. + AmazonBedrockConfig *AmazonBedrockConfig `json:"amazon_bedrock_config,omitempty"` + // Anthropic Config. Only required if the provider is 'anthropic'. + AnthropicConfig *AnthropicConfig `json:"anthropic_config,omitempty"` + // Cohere Config. Only required if the provider is 'cohere'. + CohereConfig *CohereConfig `json:"cohere_config,omitempty"` + // Databricks Model Serving Config. Only required if the provider is + // 'databricks-model-serving'. + DatabricksModelServingConfig *DatabricksModelServingConfig `json:"databricks_model_serving_config,omitempty"` + // Google Cloud Vertex AI Config. Only required if the provider is + // 'google-cloud-vertex-ai'. + GoogleCloudVertexAiConfig *GoogleCloudVertexAiConfig `json:"google_cloud_vertex_ai_config,omitempty"` + // The name of the external model. + Name string `json:"name"` + // OpenAI Config. Only required if the provider is 'openai'. + OpenaiConfig *OpenAiConfig `json:"openai_config,omitempty"` + // PaLM Config. Only required if the provider is 'palm'. + PalmConfig *PaLmConfig `json:"palm_config,omitempty"` + // The name of the provider for the external model. Currently, the supported + // providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', + // 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', 'palm', + // and 'custom'. + Provider ExternalModelProvider `json:"provider"` + // The task type of the external model. + Task string `json:"task"` +} + +type ExternalModelProvider string + +const ExternalModelProviderAi21labs ExternalModelProvider = `ai21labs` + +const ExternalModelProviderAmazonBedrock ExternalModelProvider = `amazon-bedrock` + +const ExternalModelProviderAnthropic ExternalModelProvider = `anthropic` + +const ExternalModelProviderCohere ExternalModelProvider = `cohere` + +const ExternalModelProviderDatabricksModelServing ExternalModelProvider = `databricks-model-serving` + +const ExternalModelProviderGoogleCloudVertexAi ExternalModelProvider = `google-cloud-vertex-ai` + +const ExternalModelProviderOpenai ExternalModelProvider = `openai` + +const ExternalModelProviderPalm ExternalModelProvider = `palm` + +// String representation for [fmt.Print] +func (f *ExternalModelProvider) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExternalModelProvider) Set(v string) error { + switch v { + case `ai21labs`, `amazon-bedrock`, `anthropic`, `cohere`, `databricks-model-serving`, `google-cloud-vertex-ai`, `openai`, `palm`: + *f = ExternalModelProvider(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ai21labs", "amazon-bedrock", "anthropic", "cohere", "databricks-model-serving", "google-cloud-vertex-ai", "openai", "palm"`, v) + } +} + +// Type always returns ExternalModelProvider to satisfy [pflag.Value] interface +func (f *ExternalModelProvider) Type() string { + return "ExternalModelProvider" +} + +type ExternalModelUsageElement struct { + // The number of tokens in the chat/completions response. + CompletionTokens int `json:"completion_tokens,omitempty"` + // The number of tokens in the prompt. + PromptTokens int `json:"prompt_tokens,omitempty"` + // The total number of tokens in the prompt and response. + TotalTokens int `json:"total_tokens,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExternalModelUsageElement) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExternalModelUsageElement) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// All fields are not sensitive as they are hard-coded in the system and made +// available to customers. +type FoundationModel struct { + Description string `json:"description,omitempty"` + + DisplayName string `json:"display_name,omitempty"` + + Docs string `json:"docs,omitempty"` + + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *FoundationModel) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FoundationModel) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the schema for a serving endpoint +type GetOpenApiRequest struct { + // The name of the serving endpoint that the served model belongs to. This + // field is required. + Name string `json:"-" url:"-"` +} + +type GetOpenApiResponse struct { + Contents io.ReadCloser `json:"-"` +} + +// Get serving endpoint permission levels +type GetServingEndpointPermissionLevelsRequest struct { + // The serving endpoint for which to get or manage permissions. + ServingEndpointId string `json:"-" url:"-"` +} + +type GetServingEndpointPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []ServingEndpointPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get serving endpoint permissions +type GetServingEndpointPermissionsRequest struct { + // The serving endpoint for which to get or manage permissions. + ServingEndpointId string `json:"-" url:"-"` +} + +// Get a single serving endpoint +type GetServingEndpointRequest struct { + // The name of the serving endpoint. This field is required. + Name string `json:"-" url:"-"` +} + +type GoogleCloudVertexAiConfig struct { + // The Databricks secret key reference for a private key for the service + // account which has access to the Google Cloud Vertex AI Service. See [Best + // practices for managing service account keys]. If you prefer to paste your + // API key directly, see `private_key_plaintext`. You must provide an API + // key using one of the following fields: `private_key` or + // `private_key_plaintext` + // + // [Best practices for managing service account keys]: + // https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys + PrivateKey string `json:"private_key,omitempty"` + // The private key for the service account which has access to the Google + // Cloud Vertex AI Service provided as a plaintext secret. See [Best + // practices for managing service account keys]. If you prefer to reference + // your key using Databricks Secrets, see `private_key`. You must provide an + // API key using one of the following fields: `private_key` or + // `private_key_plaintext`. + // + // [Best practices for managing service account keys]: + // https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys + PrivateKeyPlaintext string `json:"private_key_plaintext,omitempty"` + // This is the Google Cloud project id that the service account is + // associated with. + ProjectId string `json:"project_id"` + // This is the region for the Google Cloud Vertex AI Service. See [supported + // regions] for more details. Some models are only available in specific + // regions. + // + // [supported regions]: + // https://cloud.google.com/vertex-ai/docs/general/locations + Region string `json:"region"` + + ForceSendFields []string `json:"-"` +} + +func (s *GoogleCloudVertexAiConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GoogleCloudVertexAiConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type HttpRequestResponse struct { + Contents io.ReadCloser `json:"-"` +} + +type ListEndpointsResponse struct { + // The list of endpoints. + Endpoints []ServingEndpoint `json:"endpoints,omitempty"` +} + +// Get the latest logs for a served model +type LogsRequest struct { + // The name of the serving endpoint that the served model belongs to. This + // field is required. + Name string `json:"-" url:"-"` + // The name of the served model that logs will be retrieved for. This field + // is required. + ServedModelName string `json:"-" url:"-"` +} + +// A representation of all DataPlaneInfo for operations that can be done on a +// model through Data Plane APIs. +type ModelDataPlaneInfo struct { + // Information required to query DataPlane API 'query' endpoint. + QueryInfo *DataPlaneInfo `json:"query_info,omitempty"` +} + +// Configs needed to create an OpenAI model route. +type OpenAiConfig struct { + // This field is only required for Azure AD OpenAI and is the Microsoft + // Entra Client ID. + MicrosoftEntraClientId string `json:"microsoft_entra_client_id,omitempty"` + // The Databricks secret key reference for a client secret used for + // Microsoft Entra ID authentication. If you prefer to paste your client + // secret directly, see `microsoft_entra_client_secret_plaintext`. You must + // provide an API key using one of the following fields: + // `microsoft_entra_client_secret` or + // `microsoft_entra_client_secret_plaintext`. + MicrosoftEntraClientSecret string `json:"microsoft_entra_client_secret,omitempty"` + // The client secret used for Microsoft Entra ID authentication provided as + // a plaintext string. If you prefer to reference your key using Databricks + // Secrets, see `microsoft_entra_client_secret`. You must provide an API key + // using one of the following fields: `microsoft_entra_client_secret` or + // `microsoft_entra_client_secret_plaintext`. + MicrosoftEntraClientSecretPlaintext string `json:"microsoft_entra_client_secret_plaintext,omitempty"` + // This field is only required for Azure AD OpenAI and is the Microsoft + // Entra Tenant ID. + MicrosoftEntraTenantId string `json:"microsoft_entra_tenant_id,omitempty"` + // This is a field to provide a customized base URl for the OpenAI API. For + // Azure OpenAI, this field is required, and is the base URL for the Azure + // OpenAI API service provided by Azure. For other OpenAI API types, this + // field is optional, and if left unspecified, the standard OpenAI base URL + // is used. + OpenaiApiBase string `json:"openai_api_base,omitempty"` + // The Databricks secret key reference for an OpenAI API key using the + // OpenAI or Azure service. If you prefer to paste your API key directly, + // see `openai_api_key_plaintext`. You must provide an API key using one of + // the following fields: `openai_api_key` or `openai_api_key_plaintext`. + OpenaiApiKey string `json:"openai_api_key,omitempty"` + // The OpenAI API key using the OpenAI or Azure service provided as a + // plaintext string. If you prefer to reference your key using Databricks + // Secrets, see `openai_api_key`. You must provide an API key using one of + // the following fields: `openai_api_key` or `openai_api_key_plaintext`. + OpenaiApiKeyPlaintext string `json:"openai_api_key_plaintext,omitempty"` + // This is an optional field to specify the type of OpenAI API to use. For + // Azure OpenAI, this field is required, and adjust this parameter to + // represent the preferred security access validation protocol. For access + // token validation, use azure. For authentication using Azure Active + // Directory (Azure AD) use, azuread. + OpenaiApiType string `json:"openai_api_type,omitempty"` + // This is an optional field to specify the OpenAI API version. For Azure + // OpenAI, this field is required, and is the version of the Azure OpenAI + // service to utilize, specified by a date. + OpenaiApiVersion string `json:"openai_api_version,omitempty"` + // This field is only required for Azure OpenAI and is the name of the + // deployment resource for the Azure OpenAI service. + OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"` + // This is an optional field to specify the organization in OpenAI or Azure + // OpenAI. + OpenaiOrganization string `json:"openai_organization,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *OpenAiConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s OpenAiConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PaLmConfig struct { + // The Databricks secret key reference for a PaLM API key. If you prefer to + // paste your API key directly, see `palm_api_key_plaintext`. You must + // provide an API key using one of the following fields: `palm_api_key` or + // `palm_api_key_plaintext`. + PalmApiKey string `json:"palm_api_key,omitempty"` + // The PaLM API key provided as a plaintext string. If you prefer to + // reference your key using Databricks Secrets, see `palm_api_key`. You must + // provide an API key using one of the following fields: `palm_api_key` or + // `palm_api_key_plaintext`. + PalmApiKeyPlaintext string `json:"palm_api_key_plaintext,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PaLmConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PaLmConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PatchServingEndpointTags struct { + // List of endpoint tags to add + AddTags []EndpointTag `json:"add_tags,omitempty"` + // List of tag keys to delete + DeleteTags []string `json:"delete_tags,omitempty"` + // The name of the serving endpoint who's tags to patch. This field is + // required. + Name string `json:"-" url:"-"` +} + +type PayloadTable struct { + Name string `json:"name,omitempty"` + + Status string `json:"status,omitempty"` + + StatusMessage string `json:"status_message,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PayloadTable) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PayloadTable) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PutAiGatewayRequest struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `json:"guardrails,omitempty"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality. + InferenceTableConfig *AiGatewayInferenceTableConfig `json:"inference_table_config,omitempty"` + // The name of the serving endpoint whose AI Gateway is being updated. This + // field is required. + Name string `json:"-" url:"-"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `json:"rate_limits,omitempty"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `json:"usage_tracking_config,omitempty"` +} + +type PutAiGatewayResponse struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `json:"guardrails,omitempty"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality. + InferenceTableConfig *AiGatewayInferenceTableConfig `json:"inference_table_config,omitempty"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `json:"rate_limits,omitempty"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `json:"usage_tracking_config,omitempty"` +} + +type PutRequest struct { + // The name of the serving endpoint whose rate limits are being updated. + // This field is required. + Name string `json:"-" url:"-"` + // The list of endpoint rate limits. + RateLimits []RateLimit `json:"rate_limits,omitempty"` +} + +type PutResponse struct { + // The list of endpoint rate limits. + RateLimits []RateLimit `json:"rate_limits,omitempty"` +} + +type QueryEndpointInput struct { + // Pandas Dataframe input in the records orientation. + DataframeRecords []any `json:"dataframe_records,omitempty"` + // Pandas Dataframe input in the split orientation. + DataframeSplit *DataframeSplitInput `json:"dataframe_split,omitempty"` + // The extra parameters field used ONLY for __completions, chat,__ and + // __embeddings external & foundation model__ serving endpoints. This is a + // map of strings and should only be used with other external/foundation + // model query fields. + ExtraParams map[string]string `json:"extra_params,omitempty"` + // The input string (or array of strings) field used ONLY for __embeddings + // external & foundation model__ serving endpoints and is the only field + // (along with extra_params if needed) used by embeddings queries. + Input any `json:"input,omitempty"` + // Tensor-based input in columnar format. + Inputs any `json:"inputs,omitempty"` + // Tensor-based input in row format. + Instances []any `json:"instances,omitempty"` + // The max tokens field used ONLY for __completions__ and __chat external & + // foundation model__ serving endpoints. This is an integer and should only + // be used with other chat/completions query fields. + MaxTokens int `json:"max_tokens,omitempty"` + // The messages field used ONLY for __chat external & foundation model__ + // serving endpoints. This is a map of strings and should only be used with + // other chat query fields. + Messages []ChatMessage `json:"messages,omitempty"` + // The n (number of candidates) field used ONLY for __completions__ and + // __chat external & foundation model__ serving endpoints. This is an + // integer between 1 and 5 with a default of 1 and should only be used with + // other chat/completions query fields. + N int `json:"n,omitempty"` + // The name of the serving endpoint. This field is required. + Name string `json:"-" url:"-"` + // The prompt string (or array of strings) field used ONLY for __completions + // external & foundation model__ serving endpoints and should only be used + // with other completions query fields. + Prompt any `json:"prompt,omitempty"` + // The stop sequences field used ONLY for __completions__ and __chat + // external & foundation model__ serving endpoints. This is a list of + // strings and should only be used with other chat/completions query fields. + Stop []string `json:"stop,omitempty"` + // The stream field used ONLY for __completions__ and __chat external & + // foundation model__ serving endpoints. This is a boolean defaulting to + // false and should only be used with other chat/completions query fields. + Stream bool `json:"stream,omitempty"` + // The temperature field used ONLY for __completions__ and __chat external & + // foundation model__ serving endpoints. This is a float between 0.0 and 2.0 + // with a default of 1.0 and should only be used with other chat/completions + // query fields. + Temperature float64 `json:"temperature,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryEndpointInput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryEndpointInput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryEndpointResponse struct { + // The list of choices returned by the __chat or completions + // external/foundation model__ serving endpoint. + Choices []V1ResponseChoiceElement `json:"choices,omitempty"` + // The timestamp in seconds when the query was created in Unix time returned + // by a __completions or chat external/foundation model__ serving endpoint. + Created int64 `json:"created,omitempty"` + // The list of the embeddings returned by the __embeddings + // external/foundation model__ serving endpoint. + Data []EmbeddingsV1ResponseEmbeddingElement `json:"data,omitempty"` + // The ID of the query that may be returned by a __completions or chat + // external/foundation model__ serving endpoint. + Id string `json:"id,omitempty"` + // The name of the __external/foundation model__ used for querying. This is + // the name of the model that was specified in the endpoint config. + Model string `json:"model,omitempty"` + // The type of object returned by the __external/foundation model__ serving + // endpoint, one of [text_completion, chat.completion, list (of + // embeddings)]. + Object QueryEndpointResponseObject `json:"object,omitempty"` + // The predictions returned by the serving endpoint. + Predictions []any `json:"predictions,omitempty"` + // The name of the served model that served the request. This is useful when + // there are multiple models behind the same endpoint with traffic split. + ServedModelName string `json:"-" url:"-" header:"served-model-name,omitempty"` + // The usage object that may be returned by the __external/foundation + // model__ serving endpoint. This contains information about the number of + // tokens used in the prompt and response. + Usage *ExternalModelUsageElement `json:"usage,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryEndpointResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryEndpointResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of object returned by the __external/foundation model__ serving +// endpoint, one of [text_completion, chat.completion, list (of embeddings)]. +type QueryEndpointResponseObject string + +const QueryEndpointResponseObjectChatCompletion QueryEndpointResponseObject = `chat.completion` + +const QueryEndpointResponseObjectList QueryEndpointResponseObject = `list` + +const QueryEndpointResponseObjectTextCompletion QueryEndpointResponseObject = `text_completion` + +// String representation for [fmt.Print] +func (f *QueryEndpointResponseObject) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *QueryEndpointResponseObject) Set(v string) error { + switch v { + case `chat.completion`, `list`, `text_completion`: + *f = QueryEndpointResponseObject(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "chat.completion", "list", "text_completion"`, v) + } +} + +// Type always returns QueryEndpointResponseObject to satisfy [pflag.Value] interface +func (f *QueryEndpointResponseObject) Type() string { + return "QueryEndpointResponseObject" +} + +type RateLimit struct { + // Used to specify how many calls are allowed for a key within the + // renewal_period. + Calls int64 `json:"calls"` + // Key field for a serving endpoint rate limit. Currently, only 'user' and + // 'endpoint' are supported, with 'endpoint' being the default if not + // specified. + Key RateLimitKey `json:"key,omitempty"` + // Renewal period field for a serving endpoint rate limit. Currently, only + // 'minute' is supported. + RenewalPeriod RateLimitRenewalPeriod `json:"renewal_period"` +} + +type RateLimitKey string + +const RateLimitKeyEndpoint RateLimitKey = `endpoint` + +const RateLimitKeyUser RateLimitKey = `user` + +// String representation for [fmt.Print] +func (f *RateLimitKey) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RateLimitKey) Set(v string) error { + switch v { + case `endpoint`, `user`: + *f = RateLimitKey(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "endpoint", "user"`, v) + } +} + +// Type always returns RateLimitKey to satisfy [pflag.Value] interface +func (f *RateLimitKey) Type() string { + return "RateLimitKey" +} + +type RateLimitRenewalPeriod string + +const RateLimitRenewalPeriodMinute RateLimitRenewalPeriod = `minute` + +// String representation for [fmt.Print] +func (f *RateLimitRenewalPeriod) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RateLimitRenewalPeriod) Set(v string) error { + switch v { + case `minute`: + *f = RateLimitRenewalPeriod(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "minute"`, v) + } +} + +// Type always returns RateLimitRenewalPeriod to satisfy [pflag.Value] interface +func (f *RateLimitRenewalPeriod) Type() string { + return "RateLimitRenewalPeriod" +} + +type Route struct { + // The name of the served model this route configures traffic for. + ServedModelName string `json:"served_model_name"` + // The percentage of endpoint traffic to send to this route. It must be an + // integer between 0 and 100 inclusive. + TrafficPercentage int `json:"traffic_percentage"` +} + +type ServedEntityInput struct { + // The name of the entity to be served. The entity may be a model in the + // Databricks Model Registry, a model in the Unity Catalog (UC), or a + // function of type FEATURE_SPEC in the UC. If it is a UC object, the full + // name of the object should be given in the form of + // **catalog_name.schema_name.model_name**. + EntityName string `json:"entity_name,omitempty"` + + EntityVersion string `json:"entity_version,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs used for serving this entity. Note: this is an + // experimental feature and subject to change. Example entity environment + // variables that refer to Databricks secrets: `{"OPENAI_API_KEY": + // "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": + // "{{secrets/my_scope2/my_key2}}"}` + EnvironmentVars map[string]string `json:"environment_vars,omitempty"` + // The external model to be served. NOTE: Only one of external_model and + // (entity_name, entity_version, workload_size, workload_type, and + // scale_to_zero_enabled) can be specified with the latter set being used + // for custom model serving for a Databricks registered model. For an + // existing endpoint with external_model, it cannot be updated to an + // endpoint without external_model. If the endpoint is created without + // external_model, users cannot update it to add external_model later. The + // task type of all external models within an endpoint must be the same. + ExternalModel *ExternalModel `json:"external_model,omitempty"` + // ARN of the instance profile that the served entity uses to access AWS + // resources. + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // The maximum tokens per second that the endpoint can scale up to. + MaxProvisionedThroughput int `json:"max_provisioned_throughput,omitempty"` + // The minimum tokens per second that the endpoint can scale down to. + MinProvisionedThroughput int `json:"min_provisioned_throughput,omitempty"` + // The name of a served entity. It must be unique across an endpoint. A + // served entity name can consist of alphanumeric characters, dashes, and + // underscores. If not specified for an external model, this field defaults + // to external_model.name, with '.' and ':' replaced with '-', and if not + // specified for other entities, it defaults to entity_name-entity_version. + Name string `json:"name,omitempty"` + // Whether the compute resources for the served entity should scale down to + // zero. + ScaleToZeroEnabled bool `json:"scale_to_zero_enabled,omitempty"` + // The workload size of the served entity. The workload size corresponds to + // a range of provisioned concurrency that the compute autoscales between. A + // single unit of provisioned concurrency can process one request at a time. + // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), + // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 + // provisioned concurrency). If scale-to-zero is enabled, the lower bound of + // the provisioned concurrency for each workload size is 0. + WorkloadSize string `json:"workload_size,omitempty"` + // The workload type of the served entity. The workload type selects which + // type of compute to use in the endpoint. The default value for this + // parameter is "CPU". For deep learning workloads, GPU acceleration is + // available by selecting workload types like GPU_SMALL and others. See the + // available [GPU types]. + // + // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + WorkloadType ServingModelWorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServedEntityInput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServedEntityInput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServedEntityOutput struct { + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + + Creator string `json:"creator,omitempty"` + // The name of the entity to be served. The entity may be a model in the + // Databricks Model Registry, a model in the Unity Catalog (UC), or a + // function of type FEATURE_SPEC in the UC. If it is a UC object, the full + // name of the object should be given in the form of + // **catalog_name.schema_name.model_name**. + EntityName string `json:"entity_name,omitempty"` + + EntityVersion string `json:"entity_version,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs used for serving this entity. Note: this is an + // experimental feature and subject to change. Example entity environment + // variables that refer to Databricks secrets: `{"OPENAI_API_KEY": + // "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": + // "{{secrets/my_scope2/my_key2}}"}` + EnvironmentVars map[string]string `json:"environment_vars,omitempty"` + // The external model to be served. NOTE: Only one of external_model and + // (entity_name, entity_version, workload_size, workload_type, and + // scale_to_zero_enabled) can be specified with the latter set being used + // for custom model serving for a Databricks registered model. For an + // existing endpoint with external_model, it cannot be updated to an + // endpoint without external_model. If the endpoint is created without + // external_model, users cannot update it to add external_model later. The + // task type of all external models within an endpoint must be the same. + ExternalModel *ExternalModel `json:"external_model,omitempty"` + // All fields are not sensitive as they are hard-coded in the system and + // made available to customers. + FoundationModel *FoundationModel `json:"foundation_model,omitempty"` + // ARN of the instance profile that the served entity uses to access AWS + // resources. + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // The maximum tokens per second that the endpoint can scale up to. + MaxProvisionedThroughput int `json:"max_provisioned_throughput,omitempty"` + // The minimum tokens per second that the endpoint can scale down to. + MinProvisionedThroughput int `json:"min_provisioned_throughput,omitempty"` + // The name of a served entity. It must be unique across an endpoint. A + // served entity name can consist of alphanumeric characters, dashes, and + // underscores. If not specified for an external model, this field defaults + // to external_model.name, with '.' and ':' replaced with '-', and if not + // specified for other entities, it defaults to entity_name-entity_version. + Name string `json:"name,omitempty"` + // Whether the compute resources for the served entity should scale down to + // zero. + ScaleToZeroEnabled bool `json:"scale_to_zero_enabled,omitempty"` + + State *ServedModelState `json:"state,omitempty"` + // The workload size of the served entity. The workload size corresponds to + // a range of provisioned concurrency that the compute autoscales between. A + // single unit of provisioned concurrency can process one request at a time. + // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), + // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 + // provisioned concurrency). If scale-to-zero is enabled, the lower bound of + // the provisioned concurrency for each workload size is 0. + WorkloadSize string `json:"workload_size,omitempty"` + // The workload type of the served entity. The workload type selects which + // type of compute to use in the endpoint. The default value for this + // parameter is "CPU". For deep learning workloads, GPU acceleration is + // available by selecting workload types like GPU_SMALL and others. See the + // available [GPU types]. + // + // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + WorkloadType ServingModelWorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServedEntityOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServedEntityOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServedEntitySpec struct { + EntityName string `json:"entity_name,omitempty"` + + EntityVersion string `json:"entity_version,omitempty"` + + ExternalModel *ExternalModel `json:"external_model,omitempty"` + // All fields are not sensitive as they are hard-coded in the system and + // made available to customers. + FoundationModel *FoundationModel `json:"foundation_model,omitempty"` + + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServedEntitySpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServedEntitySpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServedModelInput struct { + // An object containing a set of optional, user-specified environment + // variable key-value pairs used for serving this entity. Note: this is an + // experimental feature and subject to change. Example entity environment + // variables that refer to Databricks secrets: `{"OPENAI_API_KEY": + // "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": + // "{{secrets/my_scope2/my_key2}}"}` + EnvironmentVars map[string]string `json:"environment_vars,omitempty"` + // ARN of the instance profile that the served entity uses to access AWS + // resources. + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // The maximum tokens per second that the endpoint can scale up to. + MaxProvisionedThroughput int `json:"max_provisioned_throughput,omitempty"` + // The minimum tokens per second that the endpoint can scale down to. + MinProvisionedThroughput int `json:"min_provisioned_throughput,omitempty"` + + ModelName string `json:"model_name"` + + ModelVersion string `json:"model_version"` + // The name of a served entity. It must be unique across an endpoint. A + // served entity name can consist of alphanumeric characters, dashes, and + // underscores. If not specified for an external model, this field defaults + // to external_model.name, with '.' and ':' replaced with '-', and if not + // specified for other entities, it defaults to entity_name-entity_version. + Name string `json:"name,omitempty"` + // Whether the compute resources for the served entity should scale down to + // zero. + ScaleToZeroEnabled bool `json:"scale_to_zero_enabled"` + // The workload size of the served entity. The workload size corresponds to + // a range of provisioned concurrency that the compute autoscales between. A + // single unit of provisioned concurrency can process one request at a time. + // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), + // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 + // provisioned concurrency). If scale-to-zero is enabled, the lower bound of + // the provisioned concurrency for each workload size is 0. + WorkloadSize ServedModelInputWorkloadSize `json:"workload_size,omitempty"` + // The workload type of the served entity. The workload type selects which + // type of compute to use in the endpoint. The default value for this + // parameter is "CPU". For deep learning workloads, GPU acceleration is + // available by selecting workload types like GPU_SMALL and others. See the + // available [GPU types]. + // + // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + WorkloadType ServedModelInputWorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServedModelInput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServedModelInput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServedModelInputWorkloadSize string + +const ServedModelInputWorkloadSizeLarge ServedModelInputWorkloadSize = `Large` + +const ServedModelInputWorkloadSizeMedium ServedModelInputWorkloadSize = `Medium` + +const ServedModelInputWorkloadSizeSmall ServedModelInputWorkloadSize = `Small` + +// String representation for [fmt.Print] +func (f *ServedModelInputWorkloadSize) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServedModelInputWorkloadSize) Set(v string) error { + switch v { + case `Large`, `Medium`, `Small`: + *f = ServedModelInputWorkloadSize(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Large", "Medium", "Small"`, v) + } +} + +// Type always returns ServedModelInputWorkloadSize to satisfy [pflag.Value] interface +func (f *ServedModelInputWorkloadSize) Type() string { + return "ServedModelInputWorkloadSize" +} + +type ServedModelInputWorkloadType string + +const ServedModelInputWorkloadTypeCpu ServedModelInputWorkloadType = `CPU` + +const ServedModelInputWorkloadTypeGpuLarge ServedModelInputWorkloadType = `GPU_LARGE` + +const ServedModelInputWorkloadTypeGpuMedium ServedModelInputWorkloadType = `GPU_MEDIUM` + +const ServedModelInputWorkloadTypeGpuSmall ServedModelInputWorkloadType = `GPU_SMALL` + +const ServedModelInputWorkloadTypeMultigpuMedium ServedModelInputWorkloadType = `MULTIGPU_MEDIUM` + +// String representation for [fmt.Print] +func (f *ServedModelInputWorkloadType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServedModelInputWorkloadType) Set(v string) error { + switch v { + case `CPU`, `GPU_LARGE`, `GPU_MEDIUM`, `GPU_SMALL`, `MULTIGPU_MEDIUM`: + *f = ServedModelInputWorkloadType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CPU", "GPU_LARGE", "GPU_MEDIUM", "GPU_SMALL", "MULTIGPU_MEDIUM"`, v) + } +} + +// Type always returns ServedModelInputWorkloadType to satisfy [pflag.Value] interface +func (f *ServedModelInputWorkloadType) Type() string { + return "ServedModelInputWorkloadType" +} + +type ServedModelOutput struct { + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + + Creator string `json:"creator,omitempty"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs used for serving this entity. Note: this is an + // experimental feature and subject to change. Example entity environment + // variables that refer to Databricks secrets: `{"OPENAI_API_KEY": + // "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": + // "{{secrets/my_scope2/my_key2}}"}` + EnvironmentVars map[string]string `json:"environment_vars,omitempty"` + // ARN of the instance profile that the served entity uses to access AWS + // resources. + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + + ModelName string `json:"model_name,omitempty"` + + ModelVersion string `json:"model_version,omitempty"` + // The name of a served entity. It must be unique across an endpoint. A + // served entity name can consist of alphanumeric characters, dashes, and + // underscores. If not specified for an external model, this field defaults + // to external_model.name, with '.' and ':' replaced with '-', and if not + // specified for other entities, it defaults to entity_name-entity_version. + Name string `json:"name,omitempty"` + // Whether the compute resources for the served entity should scale down to + // zero. + ScaleToZeroEnabled bool `json:"scale_to_zero_enabled,omitempty"` + + State *ServedModelState `json:"state,omitempty"` + // The workload size of the served entity. The workload size corresponds to + // a range of provisioned concurrency that the compute autoscales between. A + // single unit of provisioned concurrency can process one request at a time. + // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), + // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 + // provisioned concurrency). If scale-to-zero is enabled, the lower bound of + // the provisioned concurrency for each workload size is 0. + WorkloadSize string `json:"workload_size,omitempty"` + // The workload type of the served entity. The workload type selects which + // type of compute to use in the endpoint. The default value for this + // parameter is "CPU". For deep learning workloads, GPU acceleration is + // available by selecting workload types like GPU_SMALL and others. See the + // available [GPU types]. + // + // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + WorkloadType ServingModelWorkloadType `json:"workload_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServedModelOutput) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServedModelOutput) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServedModelSpec struct { + // Only one of model_name and entity_name should be populated + ModelName string `json:"model_name,omitempty"` + // Only one of model_version and entity_version should be populated + ModelVersion string `json:"model_version,omitempty"` + + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServedModelSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServedModelSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServedModelState struct { + Deployment ServedModelStateDeployment `json:"deployment,omitempty"` + + DeploymentStateMessage string `json:"deployment_state_message,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServedModelState) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServedModelState) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServedModelStateDeployment string + +const ServedModelStateDeploymentAborted ServedModelStateDeployment = `DEPLOYMENT_ABORTED` + +const ServedModelStateDeploymentCreating ServedModelStateDeployment = `DEPLOYMENT_CREATING` + +const ServedModelStateDeploymentFailed ServedModelStateDeployment = `DEPLOYMENT_FAILED` + +const ServedModelStateDeploymentReady ServedModelStateDeployment = `DEPLOYMENT_READY` + +const ServedModelStateDeploymentRecovering ServedModelStateDeployment = `DEPLOYMENT_RECOVERING` + +// String representation for [fmt.Print] +func (f *ServedModelStateDeployment) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServedModelStateDeployment) Set(v string) error { + switch v { + case `DEPLOYMENT_ABORTED`, `DEPLOYMENT_CREATING`, `DEPLOYMENT_FAILED`, `DEPLOYMENT_READY`, `DEPLOYMENT_RECOVERING`: + *f = ServedModelStateDeployment(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DEPLOYMENT_ABORTED", "DEPLOYMENT_CREATING", "DEPLOYMENT_FAILED", "DEPLOYMENT_READY", "DEPLOYMENT_RECOVERING"`, v) + } +} + +// Type always returns ServedModelStateDeployment to satisfy [pflag.Value] interface +func (f *ServedModelStateDeployment) Type() string { + return "ServedModelStateDeployment" +} + +type ServerLogsResponse struct { + // The most recent log lines of the model server processing invocation + // requests. + Logs string `json:"logs"` +} + +type ServingEndpoint struct { + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model and provisioned throughput endpoints are currently + // supported. + AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` + // The config that is currently being served by the endpoint. + Config *EndpointCoreConfigSummary `json:"config,omitempty"` + // The timestamp when the endpoint was created in Unix time. + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + // The email of the user who created the serving endpoint. + Creator string `json:"creator,omitempty"` + // System-generated ID of the endpoint, included to be used by the + // Permissions API. + Id string `json:"id,omitempty"` + // The timestamp when the endpoint was last updated by a user in Unix time. + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // The name of the serving endpoint. + Name string `json:"name,omitempty"` + // Information corresponding to the state of the serving endpoint. + State *EndpointState `json:"state,omitempty"` + // Tags attached to the serving endpoint. + Tags []EndpointTag `json:"tags,omitempty"` + // The task type of the serving endpoint. + Task string `json:"task,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServingEndpoint) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServingEndpoint) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServingEndpointAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel ServingEndpointPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServingEndpointAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServingEndpointAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServingEndpointAccessControlResponse struct { + // All permissions. + AllPermissions []ServingEndpointPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServingEndpointAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServingEndpointAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServingEndpointDetailed struct { + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model and provisioned throughput endpoints are currently + // supported. + AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` + // The config that is currently being served by the endpoint. + Config *EndpointCoreConfigOutput `json:"config,omitempty"` + // The timestamp when the endpoint was created in Unix time. + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + // The email of the user who created the serving endpoint. + Creator string `json:"creator,omitempty"` + // Information required to query DataPlane APIs. + DataPlaneInfo *ModelDataPlaneInfo `json:"data_plane_info,omitempty"` + // Endpoint invocation url if route optimization is enabled for endpoint + EndpointUrl string `json:"endpoint_url,omitempty"` + // System-generated ID of the endpoint. This is used to refer to the + // endpoint in the Permissions API + Id string `json:"id,omitempty"` + // The timestamp when the endpoint was last updated by a user in Unix time. + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // The name of the serving endpoint. + Name string `json:"name,omitempty"` + // The config that the endpoint is attempting to update to. + PendingConfig *EndpointPendingConfig `json:"pending_config,omitempty"` + // The permission level of the principal making the request. + PermissionLevel ServingEndpointDetailedPermissionLevel `json:"permission_level,omitempty"` + // Boolean representing if route optimization has been enabled for the + // endpoint + RouteOptimized bool `json:"route_optimized,omitempty"` + // Information corresponding to the state of the serving endpoint. + State *EndpointState `json:"state,omitempty"` + // Tags attached to the serving endpoint. + Tags []EndpointTag `json:"tags,omitempty"` + // The task type of the serving endpoint. + Task string `json:"task,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServingEndpointDetailed) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServingEndpointDetailed) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServingEndpointDetailedPermissionLevel string + +const ServingEndpointDetailedPermissionLevelCanManage ServingEndpointDetailedPermissionLevel = `CAN_MANAGE` + +const ServingEndpointDetailedPermissionLevelCanQuery ServingEndpointDetailedPermissionLevel = `CAN_QUERY` + +const ServingEndpointDetailedPermissionLevelCanView ServingEndpointDetailedPermissionLevel = `CAN_VIEW` + +// String representation for [fmt.Print] +func (f *ServingEndpointDetailedPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServingEndpointDetailedPermissionLevel) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_QUERY`, `CAN_VIEW`: + *f = ServingEndpointDetailedPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_QUERY", "CAN_VIEW"`, v) + } +} + +// Type always returns ServingEndpointDetailedPermissionLevel to satisfy [pflag.Value] interface +func (f *ServingEndpointDetailedPermissionLevel) Type() string { + return "ServingEndpointDetailedPermissionLevel" +} + +type ServingEndpointPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel ServingEndpointPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServingEndpointPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServingEndpointPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type ServingEndpointPermissionLevel string + +const ServingEndpointPermissionLevelCanManage ServingEndpointPermissionLevel = `CAN_MANAGE` + +const ServingEndpointPermissionLevelCanQuery ServingEndpointPermissionLevel = `CAN_QUERY` + +const ServingEndpointPermissionLevelCanView ServingEndpointPermissionLevel = `CAN_VIEW` + +// String representation for [fmt.Print] +func (f *ServingEndpointPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServingEndpointPermissionLevel) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_QUERY`, `CAN_VIEW`: + *f = ServingEndpointPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_QUERY", "CAN_VIEW"`, v) + } +} + +// Type always returns ServingEndpointPermissionLevel to satisfy [pflag.Value] interface +func (f *ServingEndpointPermissionLevel) Type() string { + return "ServingEndpointPermissionLevel" +} + +type ServingEndpointPermissions struct { + AccessControlList []ServingEndpointAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServingEndpointPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServingEndpointPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServingEndpointPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel ServingEndpointPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServingEndpointPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServingEndpointPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServingEndpointPermissionsRequest struct { + AccessControlList []ServingEndpointAccessControlRequest `json:"access_control_list,omitempty"` + // The serving endpoint for which to get or manage permissions. + ServingEndpointId string `json:"-" url:"-"` +} + +type ServingModelWorkloadType string + +const ServingModelWorkloadTypeCpu ServingModelWorkloadType = `CPU` + +const ServingModelWorkloadTypeGpuLarge ServingModelWorkloadType = `GPU_LARGE` + +const ServingModelWorkloadTypeGpuMedium ServingModelWorkloadType = `GPU_MEDIUM` + +const ServingModelWorkloadTypeGpuSmall ServingModelWorkloadType = `GPU_SMALL` + +const ServingModelWorkloadTypeMultigpuMedium ServingModelWorkloadType = `MULTIGPU_MEDIUM` + +// String representation for [fmt.Print] +func (f *ServingModelWorkloadType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServingModelWorkloadType) Set(v string) error { + switch v { + case `CPU`, `GPU_LARGE`, `GPU_MEDIUM`, `GPU_SMALL`, `MULTIGPU_MEDIUM`: + *f = ServingModelWorkloadType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CPU", "GPU_LARGE", "GPU_MEDIUM", "GPU_SMALL", "MULTIGPU_MEDIUM"`, v) + } +} + +// Type always returns ServingModelWorkloadType to satisfy [pflag.Value] interface +func (f *ServingModelWorkloadType) Type() string { + return "ServingModelWorkloadType" +} + +type TrafficConfig struct { + // The list of routes that define traffic to each served entity. + Routes []Route `json:"routes,omitempty"` +} + +type V1ResponseChoiceElement struct { + // The finish reason returned by the endpoint. + FinishReason string `json:"finishReason,omitempty"` + // The index of the choice in the __chat or completions__ response. + Index int `json:"index,omitempty"` + // The logprobs returned only by the __completions__ endpoint. + Logprobs int `json:"logprobs,omitempty"` + // The message response from the __chat__ endpoint. + Message *ChatMessage `json:"message,omitempty"` + // The text response from the __completions__ endpoint. + Text string `json:"text,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *V1ResponseChoiceElement) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s V1ResponseChoiceElement) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} diff --git a/settings/v2preview/api.go b/settings/v2preview/api.go new file mode 100755 index 000000000..7959cf4e6 --- /dev/null +++ b/settings/v2preview/api.go @@ -0,0 +1,1635 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Account Ip Access Lists Preview, Account Settings, Account Settings Preview, Aibi Dashboard Embedding Access Policy Preview, Aibi Dashboard Embedding Approved Domains Preview, Automatic Cluster Update Preview, Compliance Security Profile Preview, Credentials Manager Preview, Csp Enablement Account Preview, Default Namespace Preview, Disable Legacy Access Preview, Disable Legacy Dbfs Preview, Disable Legacy Features Preview, Enable Ip Access Lists Preview, Enhanced Security Monitoring Preview, Esm Enablement Account Preview, Ip Access Lists Preview, Network Connectivity Preview, Notification Destinations Preview, Personal Compute Preview, Restrict Workspace Admins Preview, Settings, Settings Preview, Token Management Preview, Tokens Preview, Workspace Conf Preview, etc. +package settingspreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type AccountIpAccessListsPreviewInterface interface { + + // Create access list. + // + // Creates an IP access list for the account. + // + // A list can be an allow list or a block list. See the top of this file for a + // description of how the server treats allow lists and block lists at runtime. + // + // When creating or updating an IP access list: + // + // * For all allow lists and block lists combined, the API supports a maximum of + // 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to + // exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. + // * If the new list would block the calling user's current IP, error 400 is + // returned with `error_code` value `INVALID_STATE`. + // + // It can take a few minutes for the changes to take effect. + Create(ctx context.Context, request CreateIpAccessList) (*CreateIpAccessListResponse, error) + + // Delete access list. + // + // Deletes an IP access list, specified by its list ID. + Delete(ctx context.Context, request DeleteAccountIpAccessListRequest) error + + // Delete access list. + // + // Deletes an IP access list, specified by its list ID. + DeleteByIpAccessListId(ctx context.Context, ipAccessListId string) error + + // Get IP access list. + // + // Gets an IP access list, specified by its list ID. + Get(ctx context.Context, request GetAccountIpAccessListRequest) (*GetIpAccessListResponse, error) + + // Get IP access list. + // + // Gets an IP access list, specified by its list ID. + GetByIpAccessListId(ctx context.Context, ipAccessListId string) (*GetIpAccessListResponse, error) + + // Get access lists. + // + // Gets all IP access lists for the specified account. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context) listing.Iterator[IpAccessListInfo] + + // Get access lists. + // + // Gets all IP access lists for the specified account. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context) ([]IpAccessListInfo, error) + + // IpAccessListInfoLabelToListIdMap calls [AccountIpAccessListsPreviewAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. + // + // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. + // + // Note: All [IpAccessListInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + IpAccessListInfoLabelToListIdMap(ctx context.Context) (map[string]string, error) + + // GetByLabel calls [AccountIpAccessListsPreviewAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. + // + // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. + // + // Note: All [IpAccessListInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByLabel(ctx context.Context, name string) (*IpAccessListInfo, error) + + // Replace access list. + // + // Replaces an IP access list, specified by its ID. + // + // A list can include allow lists and block lists. See the top of this file for + // a description of how the server treats allow lists and block lists at run + // time. When replacing an IP access list: * For all allow lists and block lists + // combined, the API supports a maximum of 1000 IP/CIDR values, where one CIDR + // counts as a single value. Attempts to exceed that number return error 400 + // with `error_code` value `QUOTA_EXCEEDED`. * If the resulting list would block + // the calling user's current IP, error 400 is returned with `error_code` value + // `INVALID_STATE`. It can take a few minutes for the changes to take effect. + Replace(ctx context.Context, request ReplaceIpAccessList) error + + // Update access list. + // + // Updates an existing IP access list, specified by its ID. + // + // A list can include allow lists and block lists. See the top of this file for + // a description of how the server treats allow lists and block lists at run + // time. + // + // When updating an IP access list: + // + // * For all allow lists and block lists combined, the API supports a maximum of + // 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to + // exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. + // * If the updated list would block the calling user's current IP, error 400 is + // returned with `error_code` value `INVALID_STATE`. + // + // It can take a few minutes for the changes to take effect. + Update(ctx context.Context, request UpdateIpAccessList) error +} + +func NewAccountIpAccessListsPreview(client *client.DatabricksClient) *AccountIpAccessListsPreviewAPI { + return &AccountIpAccessListsPreviewAPI{ + accountIpAccessListsPreviewImpl: accountIpAccessListsPreviewImpl{ + client: client, + }, + } +} + +// The Accounts IP Access List API enables account admins to configure IP access +// lists for access to the account console. +// +// Account IP Access Lists affect web application access and REST API access to +// the account console and account APIs. If the feature is disabled for the +// account, all access is allowed for this account. There is support for allow +// lists (inclusion) and block lists (exclusion). +// +// When a connection is attempted: 1. **First, all block lists are checked.** If +// the connection IP address matches any block list, the connection is rejected. +// 2. **If the connection was not rejected by block lists**, the IP address is +// compared with the allow lists. +// +// If there is at least one allow list for the account, the connection is +// allowed only if the IP address matches an allow list. If there are no allow +// lists for the account, all IP addresses are allowed. +// +// For all allow lists and block lists combined, the account supports a maximum +// of 1000 IP/CIDR values, where one CIDR counts as a single value. +// +// After changes to the account-level IP access lists, it can take a few minutes +// for changes to take effect. +type AccountIpAccessListsPreviewAPI struct { + accountIpAccessListsPreviewImpl +} + +// Delete access list. +// +// Deletes an IP access list, specified by its list ID. +func (a *AccountIpAccessListsPreviewAPI) DeleteByIpAccessListId(ctx context.Context, ipAccessListId string) error { + return a.accountIpAccessListsPreviewImpl.Delete(ctx, DeleteAccountIpAccessListRequest{ + IpAccessListId: ipAccessListId, + }) +} + +// Get IP access list. +// +// Gets an IP access list, specified by its list ID. +func (a *AccountIpAccessListsPreviewAPI) GetByIpAccessListId(ctx context.Context, ipAccessListId string) (*GetIpAccessListResponse, error) { + return a.accountIpAccessListsPreviewImpl.Get(ctx, GetAccountIpAccessListRequest{ + IpAccessListId: ipAccessListId, + }) +} + +// IpAccessListInfoLabelToListIdMap calls [AccountIpAccessListsPreviewAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. +// +// Returns an error if there's more than one [IpAccessListInfo] with the same .Label. +// +// Note: All [IpAccessListInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AccountIpAccessListsPreviewAPI) IpAccessListInfoLabelToListIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Label + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Label: %s", key) + } + mapping[key] = v.ListId + } + return mapping, nil +} + +// GetByLabel calls [AccountIpAccessListsPreviewAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. +// +// Returns an error if there's more than one [IpAccessListInfo] with the same .Label. +// +// Note: All [IpAccessListInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AccountIpAccessListsPreviewAPI) GetByLabel(ctx context.Context, name string) (*IpAccessListInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]IpAccessListInfo{} + for _, v := range result { + key := v.Label + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("IpAccessListInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of IpAccessListInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type AccountSettingsInterface interface { +} + +func NewAccountSettings(client *client.DatabricksClient) *AccountSettingsAPI { + return &AccountSettingsAPI{ + accountSettingsImpl: accountSettingsImpl{ + client: client, + }, + } +} + +// Accounts Settings API allows users to manage settings at the account level. +type AccountSettingsAPI struct { + accountSettingsImpl +} + +type AccountSettingsPreviewInterface interface { +} + +func NewAccountSettingsPreview(client *client.DatabricksClient) *AccountSettingsPreviewAPI { + return &AccountSettingsPreviewAPI{ + accountSettingsPreviewImpl: accountSettingsPreviewImpl{ + client: client, + }, + } +} + +// Accounts Settings API allows users to manage settings at the account level. +type AccountSettingsPreviewAPI struct { + accountSettingsPreviewImpl +} + +type AibiDashboardEmbeddingAccessPolicyPreviewInterface interface { + + // Delete the AI/BI dashboard embedding access policy. + // + // Delete the AI/BI dashboard embedding access policy, reverting back to the + // default. + Delete(ctx context.Context, request DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) (*DeleteAibiDashboardEmbeddingAccessPolicySettingResponse, error) + + // Retrieve the AI/BI dashboard embedding access policy. + // + // Retrieves the AI/BI dashboard embedding access policy. The default setting is + // ALLOW_APPROVED_DOMAINS, permitting AI/BI dashboards to be embedded on + // approved domains. + Get(ctx context.Context, request GetAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) + + // Update the AI/BI dashboard embedding access policy. + // + // Updates the AI/BI dashboard embedding access policy at the workspace level. + Update(ctx context.Context, request UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) +} + +func NewAibiDashboardEmbeddingAccessPolicyPreview(client *client.DatabricksClient) *AibiDashboardEmbeddingAccessPolicyPreviewAPI { + return &AibiDashboardEmbeddingAccessPolicyPreviewAPI{ + aibiDashboardEmbeddingAccessPolicyPreviewImpl: aibiDashboardEmbeddingAccessPolicyPreviewImpl{ + client: client, + }, + } +} + +// Controls whether AI/BI published dashboard embedding is enabled, +// conditionally enabled, or disabled at the workspace level. By default, this +// setting is conditionally enabled (ALLOW_APPROVED_DOMAINS). +type AibiDashboardEmbeddingAccessPolicyPreviewAPI struct { + aibiDashboardEmbeddingAccessPolicyPreviewImpl +} + +type AibiDashboardEmbeddingApprovedDomainsPreviewInterface interface { + + // Delete AI/BI dashboard embedding approved domains. + // + // Delete the list of domains approved to host embedded AI/BI dashboards, + // reverting back to the default empty list. + Delete(ctx context.Context, request DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse, error) + + // Retrieve the list of domains approved to host embedded AI/BI dashboards. + // + // Retrieves the list of domains approved to host embedded AI/BI dashboards. + Get(ctx context.Context, request GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) + + // Update the list of domains approved to host embedded AI/BI dashboards. + // + // Updates the list of domains approved to host embedded AI/BI dashboards. This + // update will fail if the current workspace access policy is not + // ALLOW_APPROVED_DOMAINS. + Update(ctx context.Context, request UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) +} + +func NewAibiDashboardEmbeddingApprovedDomainsPreview(client *client.DatabricksClient) *AibiDashboardEmbeddingApprovedDomainsPreviewAPI { + return &AibiDashboardEmbeddingApprovedDomainsPreviewAPI{ + aibiDashboardEmbeddingApprovedDomainsPreviewImpl: aibiDashboardEmbeddingApprovedDomainsPreviewImpl{ + client: client, + }, + } +} + +// Controls the list of domains approved to host the embedded AI/BI dashboards. +// The approved domains list can't be mutated when the current access policy is +// not set to ALLOW_APPROVED_DOMAINS. +type AibiDashboardEmbeddingApprovedDomainsPreviewAPI struct { + aibiDashboardEmbeddingApprovedDomainsPreviewImpl +} + +type AutomaticClusterUpdatePreviewInterface interface { + + // Get the automatic cluster update setting. + // + // Gets the automatic cluster update setting. + Get(ctx context.Context, request GetAutomaticClusterUpdateSettingRequest) (*AutomaticClusterUpdateSetting, error) + + // Update the automatic cluster update setting. + // + // Updates the automatic cluster update setting for the workspace. A fresh etag + // needs to be provided in `PATCH` requests (as part of the setting field). The + // etag can be retrieved by making a `GET` request before the `PATCH` request. + // If the setting is updated concurrently, `PATCH` fails with 409 and the + // request must be retried by using the fresh etag in the 409 response. + Update(ctx context.Context, request UpdateAutomaticClusterUpdateSettingRequest) (*AutomaticClusterUpdateSetting, error) +} + +func NewAutomaticClusterUpdatePreview(client *client.DatabricksClient) *AutomaticClusterUpdatePreviewAPI { + return &AutomaticClusterUpdatePreviewAPI{ + automaticClusterUpdatePreviewImpl: automaticClusterUpdatePreviewImpl{ + client: client, + }, + } +} + +// Controls whether automatic cluster update is enabled for the current +// workspace. By default, it is turned off. +type AutomaticClusterUpdatePreviewAPI struct { + automaticClusterUpdatePreviewImpl +} + +type ComplianceSecurityProfilePreviewInterface interface { + + // Get the compliance security profile setting. + // + // Gets the compliance security profile setting. + Get(ctx context.Context, request GetComplianceSecurityProfileSettingRequest) (*ComplianceSecurityProfileSetting, error) + + // Update the compliance security profile setting. + // + // Updates the compliance security profile setting for the workspace. A fresh + // etag needs to be provided in `PATCH` requests (as part of the setting field). + // The etag can be retrieved by making a `GET` request before the `PATCH` + // request. If the setting is updated concurrently, `PATCH` fails with 409 and + // the request must be retried by using the fresh etag in the 409 response. + Update(ctx context.Context, request UpdateComplianceSecurityProfileSettingRequest) (*ComplianceSecurityProfileSetting, error) +} + +func NewComplianceSecurityProfilePreview(client *client.DatabricksClient) *ComplianceSecurityProfilePreviewAPI { + return &ComplianceSecurityProfilePreviewAPI{ + complianceSecurityProfilePreviewImpl: complianceSecurityProfilePreviewImpl{ + client: client, + }, + } +} + +// Controls whether to enable the compliance security profile for the current +// workspace. Enabling it on a workspace is permanent. By default, it is turned +// off. +// +// This settings can NOT be disabled once it is enabled. +type ComplianceSecurityProfilePreviewAPI struct { + complianceSecurityProfilePreviewImpl +} + +type CredentialsManagerPreviewInterface interface { + + // Exchange token. + // + // Exchange tokens with an Identity Provider to get a new access token. It + // allows specifying scopes to determine token permissions. + ExchangeToken(ctx context.Context, request ExchangeTokenRequest) (*ExchangeTokenResponse, error) +} + +func NewCredentialsManagerPreview(client *client.DatabricksClient) *CredentialsManagerPreviewAPI { + return &CredentialsManagerPreviewAPI{ + credentialsManagerPreviewImpl: credentialsManagerPreviewImpl{ + client: client, + }, + } +} + +// Credentials manager interacts with with Identity Providers to to perform +// token exchanges using stored credentials and refresh tokens. +type CredentialsManagerPreviewAPI struct { + credentialsManagerPreviewImpl +} + +type CspEnablementAccountPreviewInterface interface { + + // Get the compliance security profile setting for new workspaces. + // + // Gets the compliance security profile setting for new workspaces. + Get(ctx context.Context, request GetCspEnablementAccountSettingRequest) (*CspEnablementAccountSetting, error) + + // Update the compliance security profile setting for new workspaces. + // + // Updates the value of the compliance security profile setting for new + // workspaces. + Update(ctx context.Context, request UpdateCspEnablementAccountSettingRequest) (*CspEnablementAccountSetting, error) +} + +func NewCspEnablementAccountPreview(client *client.DatabricksClient) *CspEnablementAccountPreviewAPI { + return &CspEnablementAccountPreviewAPI{ + cspEnablementAccountPreviewImpl: cspEnablementAccountPreviewImpl{ + client: client, + }, + } +} + +// The compliance security profile settings at the account level control whether +// to enable it for new workspaces. By default, this account-level setting is +// disabled for new workspaces. After workspace creation, account admins can +// enable the compliance security profile individually for each workspace. +// +// This settings can be disabled so that new workspaces do not have compliance +// security profile enabled by default. +type CspEnablementAccountPreviewAPI struct { + cspEnablementAccountPreviewImpl +} + +type DefaultNamespacePreviewInterface interface { + + // Delete the default namespace setting. + // + // Deletes the default namespace setting for the workspace. A fresh etag needs + // to be provided in `DELETE` requests (as a query parameter). The etag can be + // retrieved by making a `GET` request before the `DELETE` request. If the + // setting is updated/deleted concurrently, `DELETE` fails with 409 and the + // request must be retried by using the fresh etag in the 409 response. + Delete(ctx context.Context, request DeleteDefaultNamespaceSettingRequest) (*DeleteDefaultNamespaceSettingResponse, error) + + // Get the default namespace setting. + // + // Gets the default namespace setting. + Get(ctx context.Context, request GetDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) + + // Update the default namespace setting. + // + // Updates the default namespace setting for the workspace. A fresh etag needs + // to be provided in `PATCH` requests (as part of the setting field). The etag + // can be retrieved by making a `GET` request before the `PATCH` request. Note + // that if the setting does not exist, `GET` returns a NOT_FOUND error and the + // etag is present in the error response, which should be set in the `PATCH` + // request. If the setting is updated concurrently, `PATCH` fails with 409 and + // the request must be retried by using the fresh etag in the 409 response. + Update(ctx context.Context, request UpdateDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) +} + +func NewDefaultNamespacePreview(client *client.DatabricksClient) *DefaultNamespacePreviewAPI { + return &DefaultNamespacePreviewAPI{ + defaultNamespacePreviewImpl: defaultNamespacePreviewImpl{ + client: client, + }, + } +} + +// The default namespace setting API allows users to configure the default +// namespace for a Databricks workspace. +// +// Through this API, users can retrieve, set, or modify the default namespace +// used when queries do not reference a fully qualified three-level name. For +// example, if you use the API to set 'retail_prod' as the default catalog, then +// a query 'SELECT * FROM myTable' would reference the object +// 'retail_prod.default.myTable' (the schema 'default' is always assumed). +// +// This setting requires a restart of clusters and SQL warehouses to take +// effect. Additionally, the default namespace only applies when using Unity +// Catalog-enabled compute. +type DefaultNamespacePreviewAPI struct { + defaultNamespacePreviewImpl +} + +type DisableLegacyAccessPreviewInterface interface { + + // Delete Legacy Access Disablement Status. + // + // Deletes legacy access disablement status. + Delete(ctx context.Context, request DeleteDisableLegacyAccessRequest) (*DeleteDisableLegacyAccessResponse, error) + + // Retrieve Legacy Access Disablement Status. + // + // Retrieves legacy access disablement Status. + Get(ctx context.Context, request GetDisableLegacyAccessRequest) (*DisableLegacyAccess, error) + + // Update Legacy Access Disablement Status. + // + // Updates legacy access disablement status. + Update(ctx context.Context, request UpdateDisableLegacyAccessRequest) (*DisableLegacyAccess, error) +} + +func NewDisableLegacyAccessPreview(client *client.DatabricksClient) *DisableLegacyAccessPreviewAPI { + return &DisableLegacyAccessPreviewAPI{ + disableLegacyAccessPreviewImpl: disableLegacyAccessPreviewImpl{ + client: client, + }, + } +} + +// 'Disabling legacy access' has the following impacts: +// +// 1. Disables direct access to the Hive Metastore. However, you can still +// access Hive Metastore through HMS Federation. 2. Disables Fallback Mode (docs +// link) on any External Location access from the workspace. 3. Alters DBFS path +// access to use External Location permissions in place of legacy credentials. +// 4. Enforces Unity Catalog access on all path based access. +type DisableLegacyAccessPreviewAPI struct { + disableLegacyAccessPreviewImpl +} + +type DisableLegacyDbfsPreviewInterface interface { + + // Delete the disable legacy DBFS setting. + // + // Deletes the disable legacy DBFS setting for a workspace, reverting back to + // the default. + Delete(ctx context.Context, request DeleteDisableLegacyDbfsRequest) (*DeleteDisableLegacyDbfsResponse, error) + + // Get the disable legacy DBFS setting. + // + // Gets the disable legacy DBFS setting. + Get(ctx context.Context, request GetDisableLegacyDbfsRequest) (*DisableLegacyDbfs, error) + + // Update the disable legacy DBFS setting. + // + // Updates the disable legacy DBFS setting for the workspace. + Update(ctx context.Context, request UpdateDisableLegacyDbfsRequest) (*DisableLegacyDbfs, error) +} + +func NewDisableLegacyDbfsPreview(client *client.DatabricksClient) *DisableLegacyDbfsPreviewAPI { + return &DisableLegacyDbfsPreviewAPI{ + disableLegacyDbfsPreviewImpl: disableLegacyDbfsPreviewImpl{ + client: client, + }, + } +} + +// When this setting is on, access to DBFS root and DBFS mounts is disallowed +// (as well as creation of new mounts). When the setting is off, all DBFS +// functionality is enabled +type DisableLegacyDbfsPreviewAPI struct { + disableLegacyDbfsPreviewImpl +} + +type DisableLegacyFeaturesPreviewInterface interface { + + // Delete the disable legacy features setting. + // + // Deletes the disable legacy features setting. + Delete(ctx context.Context, request DeleteDisableLegacyFeaturesRequest) (*DeleteDisableLegacyFeaturesResponse, error) + + // Get the disable legacy features setting. + // + // Gets the value of the disable legacy features setting. + Get(ctx context.Context, request GetDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) + + // Update the disable legacy features setting. + // + // Updates the value of the disable legacy features setting. + Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) +} + +func NewDisableLegacyFeaturesPreview(client *client.DatabricksClient) *DisableLegacyFeaturesPreviewAPI { + return &DisableLegacyFeaturesPreviewAPI{ + disableLegacyFeaturesPreviewImpl: disableLegacyFeaturesPreviewImpl{ + client: client, + }, + } +} + +// Disable legacy features for new Databricks workspaces. +// +// For newly created workspaces: 1. Disables the use of DBFS root and mounts. 2. +// Hive Metastore will not be provisioned. 3. Disables the use of +// ‘No-isolation clusters’. 4. Disables Databricks Runtime versions prior to +// 13.3LTS. +type DisableLegacyFeaturesPreviewAPI struct { + disableLegacyFeaturesPreviewImpl +} + +type EnableIpAccessListsPreviewInterface interface { + + // Delete the account IP access toggle setting. + // + // Reverts the value of the account IP access toggle setting to default (ON) + Delete(ctx context.Context, request DeleteAccountIpAccessEnableRequest) (*DeleteAccountIpAccessEnableResponse, error) + + // Get the account IP access toggle setting. + // + // Gets the value of the account IP access toggle setting. + Get(ctx context.Context, request GetAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) + + // Update the account IP access toggle setting. + // + // Updates the value of the account IP access toggle setting. + Update(ctx context.Context, request UpdateAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) +} + +func NewEnableIpAccessListsPreview(client *client.DatabricksClient) *EnableIpAccessListsPreviewAPI { + return &EnableIpAccessListsPreviewAPI{ + enableIpAccessListsPreviewImpl: enableIpAccessListsPreviewImpl{ + client: client, + }, + } +} + +// Controls the enforcement of IP access lists for accessing the account +// console. Allowing you to enable or disable restricted access based on IP +// addresses. +type EnableIpAccessListsPreviewAPI struct { + enableIpAccessListsPreviewImpl +} + +type EnhancedSecurityMonitoringPreviewInterface interface { + + // Get the enhanced security monitoring setting. + // + // Gets the enhanced security monitoring setting. + Get(ctx context.Context, request GetEnhancedSecurityMonitoringSettingRequest) (*EnhancedSecurityMonitoringSetting, error) + + // Update the enhanced security monitoring setting. + // + // Updates the enhanced security monitoring setting for the workspace. A fresh + // etag needs to be provided in `PATCH` requests (as part of the setting field). + // The etag can be retrieved by making a `GET` request before the `PATCH` + // request. If the setting is updated concurrently, `PATCH` fails with 409 and + // the request must be retried by using the fresh etag in the 409 response. + Update(ctx context.Context, request UpdateEnhancedSecurityMonitoringSettingRequest) (*EnhancedSecurityMonitoringSetting, error) +} + +func NewEnhancedSecurityMonitoringPreview(client *client.DatabricksClient) *EnhancedSecurityMonitoringPreviewAPI { + return &EnhancedSecurityMonitoringPreviewAPI{ + enhancedSecurityMonitoringPreviewImpl: enhancedSecurityMonitoringPreviewImpl{ + client: client, + }, + } +} + +// Controls whether enhanced security monitoring is enabled for the current +// workspace. If the compliance security profile is enabled, this is +// automatically enabled. By default, it is disabled. However, if the compliance +// security profile is enabled, this is automatically enabled. +// +// If the compliance security profile is disabled, you can enable or disable +// this setting and it is not permanent. +type EnhancedSecurityMonitoringPreviewAPI struct { + enhancedSecurityMonitoringPreviewImpl +} + +type EsmEnablementAccountPreviewInterface interface { + + // Get the enhanced security monitoring setting for new workspaces. + // + // Gets the enhanced security monitoring setting for new workspaces. + Get(ctx context.Context, request GetEsmEnablementAccountSettingRequest) (*EsmEnablementAccountSetting, error) + + // Update the enhanced security monitoring setting for new workspaces. + // + // Updates the value of the enhanced security monitoring setting for new + // workspaces. + Update(ctx context.Context, request UpdateEsmEnablementAccountSettingRequest) (*EsmEnablementAccountSetting, error) +} + +func NewEsmEnablementAccountPreview(client *client.DatabricksClient) *EsmEnablementAccountPreviewAPI { + return &EsmEnablementAccountPreviewAPI{ + esmEnablementAccountPreviewImpl: esmEnablementAccountPreviewImpl{ + client: client, + }, + } +} + +// The enhanced security monitoring setting at the account level controls +// whether to enable the feature on new workspaces. By default, this +// account-level setting is disabled for new workspaces. After workspace +// creation, account admins can enable enhanced security monitoring individually +// for each workspace. +type EsmEnablementAccountPreviewAPI struct { + esmEnablementAccountPreviewImpl +} + +type IpAccessListsPreviewInterface interface { + + // Create access list. + // + // Creates an IP access list for this workspace. + // + // A list can be an allow list or a block list. See the top of this file for a + // description of how the server treats allow lists and block lists at runtime. + // + // When creating or updating an IP access list: + // + // * For all allow lists and block lists combined, the API supports a maximum of + // 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to + // exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. + // * If the new list would block the calling user's current IP, error 400 is + // returned with `error_code` value `INVALID_STATE`. + // + // It can take a few minutes for the changes to take effect. **Note**: Your new + // IP access list has no effect until you enable the feature. See + // :method:workspaceconf/setStatus + Create(ctx context.Context, request CreateIpAccessList) (*CreateIpAccessListResponse, error) + + // Delete access list. + // + // Deletes an IP access list, specified by its list ID. + Delete(ctx context.Context, request DeleteIpAccessListRequest) error + + // Delete access list. + // + // Deletes an IP access list, specified by its list ID. + DeleteByIpAccessListId(ctx context.Context, ipAccessListId string) error + + // Get access list. + // + // Gets an IP access list, specified by its list ID. + Get(ctx context.Context, request GetIpAccessListRequest) (*FetchIpAccessListResponse, error) + + // Get access list. + // + // Gets an IP access list, specified by its list ID. + GetByIpAccessListId(ctx context.Context, ipAccessListId string) (*FetchIpAccessListResponse, error) + + // Get access lists. + // + // Gets all IP access lists for the specified workspace. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context) listing.Iterator[IpAccessListInfo] + + // Get access lists. + // + // Gets all IP access lists for the specified workspace. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context) ([]IpAccessListInfo, error) + + // IpAccessListInfoLabelToListIdMap calls [IpAccessListsPreviewAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. + // + // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. + // + // Note: All [IpAccessListInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + IpAccessListInfoLabelToListIdMap(ctx context.Context) (map[string]string, error) + + // GetByLabel calls [IpAccessListsPreviewAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. + // + // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. + // + // Note: All [IpAccessListInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByLabel(ctx context.Context, name string) (*IpAccessListInfo, error) + + // Replace access list. + // + // Replaces an IP access list, specified by its ID. + // + // A list can include allow lists and block lists. See the top of this file for + // a description of how the server treats allow lists and block lists at run + // time. When replacing an IP access list: * For all allow lists and block lists + // combined, the API supports a maximum of 1000 IP/CIDR values, where one CIDR + // counts as a single value. Attempts to exceed that number return error 400 + // with `error_code` value `QUOTA_EXCEEDED`. * If the resulting list would block + // the calling user's current IP, error 400 is returned with `error_code` value + // `INVALID_STATE`. It can take a few minutes for the changes to take effect. + // Note that your resulting IP access list has no effect until you enable the + // feature. See :method:workspaceconf/setStatus. + Replace(ctx context.Context, request ReplaceIpAccessList) error + + // Update access list. + // + // Updates an existing IP access list, specified by its ID. + // + // A list can include allow lists and block lists. See the top of this file for + // a description of how the server treats allow lists and block lists at run + // time. + // + // When updating an IP access list: + // + // * For all allow lists and block lists combined, the API supports a maximum of + // 1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to + // exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. + // * If the updated list would block the calling user's current IP, error 400 is + // returned with `error_code` value `INVALID_STATE`. + // + // It can take a few minutes for the changes to take effect. Note that your + // resulting IP access list has no effect until you enable the feature. See + // :method:workspaceconf/setStatus. + Update(ctx context.Context, request UpdateIpAccessList) error +} + +func NewIpAccessListsPreview(client *client.DatabricksClient) *IpAccessListsPreviewAPI { + return &IpAccessListsPreviewAPI{ + ipAccessListsPreviewImpl: ipAccessListsPreviewImpl{ + client: client, + }, + } +} + +// IP Access List enables admins to configure IP access lists. +// +// IP access lists affect web application access and REST API access to this +// workspace only. If the feature is disabled for a workspace, all access is +// allowed for this workspace. There is support for allow lists (inclusion) and +// block lists (exclusion). +// +// When a connection is attempted: 1. **First, all block lists are checked.** If +// the connection IP address matches any block list, the connection is rejected. +// 2. **If the connection was not rejected by block lists**, the IP address is +// compared with the allow lists. +// +// If there is at least one allow list for the workspace, the connection is +// allowed only if the IP address matches an allow list. If there are no allow +// lists for the workspace, all IP addresses are allowed. +// +// For all allow lists and block lists combined, the workspace supports a +// maximum of 1000 IP/CIDR values, where one CIDR counts as a single value. +// +// After changes to the IP access list feature, it can take a few minutes for +// changes to take effect. +type IpAccessListsPreviewAPI struct { + ipAccessListsPreviewImpl +} + +// Delete access list. +// +// Deletes an IP access list, specified by its list ID. +func (a *IpAccessListsPreviewAPI) DeleteByIpAccessListId(ctx context.Context, ipAccessListId string) error { + return a.ipAccessListsPreviewImpl.Delete(ctx, DeleteIpAccessListRequest{ + IpAccessListId: ipAccessListId, + }) +} + +// Get access list. +// +// Gets an IP access list, specified by its list ID. +func (a *IpAccessListsPreviewAPI) GetByIpAccessListId(ctx context.Context, ipAccessListId string) (*FetchIpAccessListResponse, error) { + return a.ipAccessListsPreviewImpl.Get(ctx, GetIpAccessListRequest{ + IpAccessListId: ipAccessListId, + }) +} + +// IpAccessListInfoLabelToListIdMap calls [IpAccessListsPreviewAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. +// +// Returns an error if there's more than one [IpAccessListInfo] with the same .Label. +// +// Note: All [IpAccessListInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *IpAccessListsPreviewAPI) IpAccessListInfoLabelToListIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Label + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Label: %s", key) + } + mapping[key] = v.ListId + } + return mapping, nil +} + +// GetByLabel calls [IpAccessListsPreviewAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. +// +// Returns an error if there's more than one [IpAccessListInfo] with the same .Label. +// +// Note: All [IpAccessListInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *IpAccessListsPreviewAPI) GetByLabel(ctx context.Context, name string) (*IpAccessListInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]IpAccessListInfo{} + for _, v := range result { + key := v.Label + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("IpAccessListInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of IpAccessListInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type NetworkConnectivityPreviewInterface interface { + + // Create a network connectivity configuration. + CreateNetworkConnectivityConfiguration(ctx context.Context, request CreateNetworkConnectivityConfigRequest) (*NetworkConnectivityConfiguration, error) + + // Create a private endpoint rule. + // + // Create a private endpoint rule for the specified network connectivity config + // object. Once the object is created, Databricks asynchronously provisions a + // new Azure private endpoint to your specified Azure resource. + // + // **IMPORTANT**: You must use Azure portal or other Azure tools to approve the + // private endpoint to complete the connection. To get the information of the + // private endpoint created, make a `GET` request on the new private endpoint + // rule. See [serverless private link]. + // + // [serverless private link]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security/serverless-private-link + CreatePrivateEndpointRule(ctx context.Context, request CreatePrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) + + // Delete a network connectivity configuration. + // + // Deletes a network connectivity configuration. + DeleteNetworkConnectivityConfiguration(ctx context.Context, request DeleteNetworkConnectivityConfigurationRequest) error + + // Delete a network connectivity configuration. + // + // Deletes a network connectivity configuration. + DeleteNetworkConnectivityConfigurationByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) error + + // Delete a private endpoint rule. + // + // Initiates deleting a private endpoint rule. If the connection state is + // PENDING or EXPIRED, the private endpoint is immediately deleted. Otherwise, + // the private endpoint is deactivated and will be deleted after seven days of + // deactivation. When a private endpoint is deactivated, the `deactivated` field + // is set to `true` and the private endpoint is not available to your serverless + // compute resources. + DeletePrivateEndpointRule(ctx context.Context, request DeletePrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) + + // Delete a private endpoint rule. + // + // Initiates deleting a private endpoint rule. If the connection state is + // PENDING or EXPIRED, the private endpoint is immediately deleted. Otherwise, + // the private endpoint is deactivated and will be deleted after seven days of + // deactivation. When a private endpoint is deactivated, the `deactivated` field + // is set to `true` and the private endpoint is not available to your serverless + // compute resources. + DeletePrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) + + // Get a network connectivity configuration. + // + // Gets a network connectivity configuration. + GetNetworkConnectivityConfiguration(ctx context.Context, request GetNetworkConnectivityConfigurationRequest) (*NetworkConnectivityConfiguration, error) + + // Get a network connectivity configuration. + // + // Gets a network connectivity configuration. + GetNetworkConnectivityConfigurationByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*NetworkConnectivityConfiguration, error) + + // Get a private endpoint rule. + // + // Gets the private endpoint rule. + GetPrivateEndpointRule(ctx context.Context, request GetPrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) + + // Get a private endpoint rule. + // + // Gets the private endpoint rule. + GetPrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) + + // List network connectivity configurations. + // + // Gets an array of network connectivity configurations. + // + // This method is generated by Databricks SDK Code Generator. + ListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) listing.Iterator[NetworkConnectivityConfiguration] + + // List network connectivity configurations. + // + // Gets an array of network connectivity configurations. + // + // This method is generated by Databricks SDK Code Generator. + ListNetworkConnectivityConfigurationsAll(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) ([]NetworkConnectivityConfiguration, error) + + // List private endpoint rules. + // + // Gets an array of private endpoint rules. + // + // This method is generated by Databricks SDK Code Generator. + ListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) listing.Iterator[NccAzurePrivateEndpointRule] + + // List private endpoint rules. + // + // Gets an array of private endpoint rules. + // + // This method is generated by Databricks SDK Code Generator. + ListPrivateEndpointRulesAll(ctx context.Context, request ListPrivateEndpointRulesRequest) ([]NccAzurePrivateEndpointRule, error) + + // List private endpoint rules. + // + // Gets an array of private endpoint rules. + ListPrivateEndpointRulesByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*ListNccAzurePrivateEndpointRulesResponse, error) +} + +func NewNetworkConnectivityPreview(client *client.DatabricksClient) *NetworkConnectivityPreviewAPI { + return &NetworkConnectivityPreviewAPI{ + networkConnectivityPreviewImpl: networkConnectivityPreviewImpl{ + client: client, + }, + } +} + +// These APIs provide configurations for the network connectivity of your +// workspaces for serverless compute resources. +type NetworkConnectivityPreviewAPI struct { + networkConnectivityPreviewImpl +} + +// Delete a network connectivity configuration. +// +// Deletes a network connectivity configuration. +func (a *NetworkConnectivityPreviewAPI) DeleteNetworkConnectivityConfigurationByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) error { + return a.networkConnectivityPreviewImpl.DeleteNetworkConnectivityConfiguration(ctx, DeleteNetworkConnectivityConfigurationRequest{ + NetworkConnectivityConfigId: networkConnectivityConfigId, + }) +} + +// Delete a private endpoint rule. +// +// Initiates deleting a private endpoint rule. If the connection state is +// PENDING or EXPIRED, the private endpoint is immediately deleted. Otherwise, +// the private endpoint is deactivated and will be deleted after seven days of +// deactivation. When a private endpoint is deactivated, the `deactivated` field +// is set to `true` and the private endpoint is not available to your serverless +// compute resources. +func (a *NetworkConnectivityPreviewAPI) DeletePrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) { + return a.networkConnectivityPreviewImpl.DeletePrivateEndpointRule(ctx, DeletePrivateEndpointRuleRequest{ + NetworkConnectivityConfigId: networkConnectivityConfigId, + PrivateEndpointRuleId: privateEndpointRuleId, + }) +} + +// Get a network connectivity configuration. +// +// Gets a network connectivity configuration. +func (a *NetworkConnectivityPreviewAPI) GetNetworkConnectivityConfigurationByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*NetworkConnectivityConfiguration, error) { + return a.networkConnectivityPreviewImpl.GetNetworkConnectivityConfiguration(ctx, GetNetworkConnectivityConfigurationRequest{ + NetworkConnectivityConfigId: networkConnectivityConfigId, + }) +} + +// Get a private endpoint rule. +// +// Gets the private endpoint rule. +func (a *NetworkConnectivityPreviewAPI) GetPrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) { + return a.networkConnectivityPreviewImpl.GetPrivateEndpointRule(ctx, GetPrivateEndpointRuleRequest{ + NetworkConnectivityConfigId: networkConnectivityConfigId, + PrivateEndpointRuleId: privateEndpointRuleId, + }) +} + +// List private endpoint rules. +// +// Gets an array of private endpoint rules. +func (a *NetworkConnectivityPreviewAPI) ListPrivateEndpointRulesByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*ListNccAzurePrivateEndpointRulesResponse, error) { + return a.networkConnectivityPreviewImpl.internalListPrivateEndpointRules(ctx, ListPrivateEndpointRulesRequest{ + NetworkConnectivityConfigId: networkConnectivityConfigId, + }) +} + +type NotificationDestinationsPreviewInterface interface { + + // Create a notification destination. + // + // Creates a notification destination. Requires workspace admin permissions. + Create(ctx context.Context, request CreateNotificationDestinationRequest) (*NotificationDestination, error) + + // Delete a notification destination. + // + // Deletes a notification destination. Requires workspace admin permissions. + Delete(ctx context.Context, request DeleteNotificationDestinationRequest) error + + // Delete a notification destination. + // + // Deletes a notification destination. Requires workspace admin permissions. + DeleteById(ctx context.Context, id string) error + + // Get a notification destination. + // + // Gets a notification destination. + Get(ctx context.Context, request GetNotificationDestinationRequest) (*NotificationDestination, error) + + // Get a notification destination. + // + // Gets a notification destination. + GetById(ctx context.Context, id string) (*NotificationDestination, error) + + // List notification destinations. + // + // Lists notification destinations. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListNotificationDestinationsRequest) listing.Iterator[ListNotificationDestinationsResult] + + // List notification destinations. + // + // Lists notification destinations. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListNotificationDestinationsRequest) ([]ListNotificationDestinationsResult, error) + + // Update a notification destination. + // + // Updates a notification destination. Requires workspace admin permissions. At + // least one field is required in the request body. + Update(ctx context.Context, request UpdateNotificationDestinationRequest) (*NotificationDestination, error) +} + +func NewNotificationDestinationsPreview(client *client.DatabricksClient) *NotificationDestinationsPreviewAPI { + return &NotificationDestinationsPreviewAPI{ + notificationDestinationsPreviewImpl: notificationDestinationsPreviewImpl{ + client: client, + }, + } +} + +// The notification destinations API lets you programmatically manage a +// workspace's notification destinations. Notification destinations are used to +// send notifications for query alerts and jobs to destinations outside of +// Databricks. Only workspace admins can create, update, and delete notification +// destinations. +type NotificationDestinationsPreviewAPI struct { + notificationDestinationsPreviewImpl +} + +// Delete a notification destination. +// +// Deletes a notification destination. Requires workspace admin permissions. +func (a *NotificationDestinationsPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.notificationDestinationsPreviewImpl.Delete(ctx, DeleteNotificationDestinationRequest{ + Id: id, + }) +} + +// Get a notification destination. +// +// Gets a notification destination. +func (a *NotificationDestinationsPreviewAPI) GetById(ctx context.Context, id string) (*NotificationDestination, error) { + return a.notificationDestinationsPreviewImpl.Get(ctx, GetNotificationDestinationRequest{ + Id: id, + }) +} + +type PersonalComputePreviewInterface interface { + + // Delete Personal Compute setting. + // + // Reverts back the Personal Compute setting value to default (ON) + Delete(ctx context.Context, request DeletePersonalComputeSettingRequest) (*DeletePersonalComputeSettingResponse, error) + + // Get Personal Compute setting. + // + // Gets the value of the Personal Compute setting. + Get(ctx context.Context, request GetPersonalComputeSettingRequest) (*PersonalComputeSetting, error) + + // Update Personal Compute setting. + // + // Updates the value of the Personal Compute setting. + Update(ctx context.Context, request UpdatePersonalComputeSettingRequest) (*PersonalComputeSetting, error) +} + +func NewPersonalComputePreview(client *client.DatabricksClient) *PersonalComputePreviewAPI { + return &PersonalComputePreviewAPI{ + personalComputePreviewImpl: personalComputePreviewImpl{ + client: client, + }, + } +} + +// The Personal Compute enablement setting lets you control which users can use +// the Personal Compute default policy to create compute resources. By default +// all users in all workspaces have access (ON), but you can change the setting +// to instead let individual workspaces configure access control (DELEGATE). +// +// There is only one instance of this setting per account. Since this setting +// has a default value, this setting is present on all accounts even though it's +// never set on a given account. Deletion reverts the value of the setting back +// to the default value. +type PersonalComputePreviewAPI struct { + personalComputePreviewImpl +} + +type RestrictWorkspaceAdminsPreviewInterface interface { + + // Delete the restrict workspace admins setting. + // + // Reverts the restrict workspace admins setting status for the workspace. A + // fresh etag needs to be provided in `DELETE` requests (as a query parameter). + // The etag can be retrieved by making a `GET` request before the DELETE + // request. If the setting is updated/deleted concurrently, `DELETE` fails with + // 409 and the request must be retried by using the fresh etag in the 409 + // response. + Delete(ctx context.Context, request DeleteRestrictWorkspaceAdminsSettingRequest) (*DeleteRestrictWorkspaceAdminsSettingResponse, error) + + // Get the restrict workspace admins setting. + // + // Gets the restrict workspace admins setting. + Get(ctx context.Context, request GetRestrictWorkspaceAdminsSettingRequest) (*RestrictWorkspaceAdminsSetting, error) + + // Update the restrict workspace admins setting. + // + // Updates the restrict workspace admins setting for the workspace. A fresh etag + // needs to be provided in `PATCH` requests (as part of the setting field). The + // etag can be retrieved by making a GET request before the `PATCH` request. If + // the setting is updated concurrently, `PATCH` fails with 409 and the request + // must be retried by using the fresh etag in the 409 response. + Update(ctx context.Context, request UpdateRestrictWorkspaceAdminsSettingRequest) (*RestrictWorkspaceAdminsSetting, error) +} + +func NewRestrictWorkspaceAdminsPreview(client *client.DatabricksClient) *RestrictWorkspaceAdminsPreviewAPI { + return &RestrictWorkspaceAdminsPreviewAPI{ + restrictWorkspaceAdminsPreviewImpl: restrictWorkspaceAdminsPreviewImpl{ + client: client, + }, + } +} + +// The Restrict Workspace Admins setting lets you control the capabilities of +// workspace admins. With the setting status set to ALLOW_ALL, workspace admins +// can create service principal personal access tokens on behalf of any service +// principal in their workspace. Workspace admins can also change a job owner to +// any user in their workspace. And they can change the job run_as setting to +// any user in their workspace or to a service principal on which they have the +// Service Principal User role. With the setting status set to +// RESTRICT_TOKENS_AND_JOB_RUN_AS, workspace admins can only create personal +// access tokens on behalf of service principals they have the Service Principal +// User role on. They can also only change a job owner to themselves. And they +// can change the job run_as setting to themselves or to a service principal on +// which they have the Service Principal User role. +type RestrictWorkspaceAdminsPreviewAPI struct { + restrictWorkspaceAdminsPreviewImpl +} + +type SettingsInterface interface { +} + +func NewSettings(client *client.DatabricksClient) *SettingsAPI { + return &SettingsAPI{ + settingsImpl: settingsImpl{ + client: client, + }, + } +} + +// Workspace Settings API allows users to manage settings at the workspace +// level. +type SettingsAPI struct { + settingsImpl +} + +type SettingsPreviewInterface interface { +} + +func NewSettingsPreview(client *client.DatabricksClient) *SettingsPreviewAPI { + return &SettingsPreviewAPI{ + settingsPreviewImpl: settingsPreviewImpl{ + client: client, + }, + } +} + +// Workspace Settings API allows users to manage settings at the workspace +// level. +type SettingsPreviewAPI struct { + settingsPreviewImpl +} + +type TokenManagementPreviewInterface interface { + + // Create on-behalf token. + // + // Creates a token on behalf of a service principal. + CreateOboToken(ctx context.Context, request CreateOboTokenRequest) (*CreateOboTokenResponse, error) + + // Delete a token. + // + // Deletes a token, specified by its ID. + Delete(ctx context.Context, request DeleteTokenManagementRequest) error + + // Delete a token. + // + // Deletes a token, specified by its ID. + DeleteByTokenId(ctx context.Context, tokenId string) error + + // Get token info. + // + // Gets information about a token, specified by its ID. + Get(ctx context.Context, request GetTokenManagementRequest) (*GetTokenResponse, error) + + // Get token info. + // + // Gets information about a token, specified by its ID. + GetByTokenId(ctx context.Context, tokenId string) (*GetTokenResponse, error) + + // Get token permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context) (*GetTokenPermissionLevelsResponse, error) + + // Get token permissions. + // + // Gets the permissions of all tokens. Tokens can inherit permissions from their + // root object. + GetPermissions(ctx context.Context) (*TokenPermissions, error) + + // List all tokens. + // + // Lists all tokens associated with the specified workspace or user. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListTokenManagementRequest) listing.Iterator[TokenInfo] + + // List all tokens. + // + // Lists all tokens associated with the specified workspace or user. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListTokenManagementRequest) ([]TokenInfo, error) + + // TokenInfoCommentToTokenIdMap calls [TokenManagementPreviewAPI.ListAll] and creates a map of results with [TokenInfo].Comment as key and [TokenInfo].TokenId as value. + // + // Returns an error if there's more than one [TokenInfo] with the same .Comment. + // + // Note: All [TokenInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + TokenInfoCommentToTokenIdMap(ctx context.Context, request ListTokenManagementRequest) (map[string]string, error) + + // GetByComment calls [TokenManagementPreviewAPI.TokenInfoCommentToTokenIdMap] and returns a single [TokenInfo]. + // + // Returns an error if there's more than one [TokenInfo] with the same .Comment. + // + // Note: All [TokenInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByComment(ctx context.Context, name string) (*TokenInfo, error) + + // Set token permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request TokenPermissionsRequest) (*TokenPermissions, error) + + // Update token permissions. + // + // Updates the permissions on all tokens. Tokens can inherit permissions from + // their root object. + UpdatePermissions(ctx context.Context, request TokenPermissionsRequest) (*TokenPermissions, error) +} + +func NewTokenManagementPreview(client *client.DatabricksClient) *TokenManagementPreviewAPI { + return &TokenManagementPreviewAPI{ + tokenManagementPreviewImpl: tokenManagementPreviewImpl{ + client: client, + }, + } +} + +// Enables administrators to get all tokens and delete tokens for other users. +// Admins can either get every token, get a specific token by ID, or get all +// tokens for a particular user. +type TokenManagementPreviewAPI struct { + tokenManagementPreviewImpl +} + +// Delete a token. +// +// Deletes a token, specified by its ID. +func (a *TokenManagementPreviewAPI) DeleteByTokenId(ctx context.Context, tokenId string) error { + return a.tokenManagementPreviewImpl.Delete(ctx, DeleteTokenManagementRequest{ + TokenId: tokenId, + }) +} + +// Get token info. +// +// Gets information about a token, specified by its ID. +func (a *TokenManagementPreviewAPI) GetByTokenId(ctx context.Context, tokenId string) (*GetTokenResponse, error) { + return a.tokenManagementPreviewImpl.Get(ctx, GetTokenManagementRequest{ + TokenId: tokenId, + }) +} + +// TokenInfoCommentToTokenIdMap calls [TokenManagementPreviewAPI.ListAll] and creates a map of results with [TokenInfo].Comment as key and [TokenInfo].TokenId as value. +// +// Returns an error if there's more than one [TokenInfo] with the same .Comment. +// +// Note: All [TokenInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *TokenManagementPreviewAPI) TokenInfoCommentToTokenIdMap(ctx context.Context, request ListTokenManagementRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Comment + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Comment: %s", key) + } + mapping[key] = v.TokenId + } + return mapping, nil +} + +// GetByComment calls [TokenManagementPreviewAPI.TokenInfoCommentToTokenIdMap] and returns a single [TokenInfo]. +// +// Returns an error if there's more than one [TokenInfo] with the same .Comment. +// +// Note: All [TokenInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *TokenManagementPreviewAPI) GetByComment(ctx context.Context, name string) (*TokenInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListTokenManagementRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]TokenInfo{} + for _, v := range result { + key := v.Comment + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("TokenInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of TokenInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type TokensPreviewInterface interface { + + // Create a user token. + // + // Creates and returns a token for a user. If this call is made through token + // authentication, it creates a token with the same client ID as the + // authenticated token. If the user's token quota is exceeded, this call returns + // an error **QUOTA_EXCEEDED**. + Create(ctx context.Context, request CreateTokenRequest) (*CreateTokenResponse, error) + + // Revoke token. + // + // Revokes an access token. + // + // If a token with the specified ID is not valid, this call returns an error + // **RESOURCE_DOES_NOT_EXIST**. + Delete(ctx context.Context, request RevokeTokenRequest) error + + // Revoke token. + // + // Revokes an access token. + // + // If a token with the specified ID is not valid, this call returns an error + // **RESOURCE_DOES_NOT_EXIST**. + DeleteByTokenId(ctx context.Context, tokenId string) error + + // List tokens. + // + // Lists all the valid tokens for a user-workspace pair. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context) listing.Iterator[PublicTokenInfo] + + // List tokens. + // + // Lists all the valid tokens for a user-workspace pair. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context) ([]PublicTokenInfo, error) + + // PublicTokenInfoCommentToTokenIdMap calls [TokensPreviewAPI.ListAll] and creates a map of results with [PublicTokenInfo].Comment as key and [PublicTokenInfo].TokenId as value. + // + // Returns an error if there's more than one [PublicTokenInfo] with the same .Comment. + // + // Note: All [PublicTokenInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + PublicTokenInfoCommentToTokenIdMap(ctx context.Context) (map[string]string, error) + + // GetByComment calls [TokensPreviewAPI.PublicTokenInfoCommentToTokenIdMap] and returns a single [PublicTokenInfo]. + // + // Returns an error if there's more than one [PublicTokenInfo] with the same .Comment. + // + // Note: All [PublicTokenInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByComment(ctx context.Context, name string) (*PublicTokenInfo, error) +} + +func NewTokensPreview(client *client.DatabricksClient) *TokensPreviewAPI { + return &TokensPreviewAPI{ + tokensPreviewImpl: tokensPreviewImpl{ + client: client, + }, + } +} + +// The Token API allows you to create, list, and revoke tokens that can be used +// to authenticate and access Databricks REST APIs. +type TokensPreviewAPI struct { + tokensPreviewImpl +} + +// Revoke token. +// +// Revokes an access token. +// +// If a token with the specified ID is not valid, this call returns an error +// **RESOURCE_DOES_NOT_EXIST**. +func (a *TokensPreviewAPI) DeleteByTokenId(ctx context.Context, tokenId string) error { + return a.tokensPreviewImpl.Delete(ctx, RevokeTokenRequest{ + TokenId: tokenId, + }) +} + +// PublicTokenInfoCommentToTokenIdMap calls [TokensPreviewAPI.ListAll] and creates a map of results with [PublicTokenInfo].Comment as key and [PublicTokenInfo].TokenId as value. +// +// Returns an error if there's more than one [PublicTokenInfo] with the same .Comment. +// +// Note: All [PublicTokenInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *TokensPreviewAPI) PublicTokenInfoCommentToTokenIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Comment + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Comment: %s", key) + } + mapping[key] = v.TokenId + } + return mapping, nil +} + +// GetByComment calls [TokensPreviewAPI.PublicTokenInfoCommentToTokenIdMap] and returns a single [PublicTokenInfo]. +// +// Returns an error if there's more than one [PublicTokenInfo] with the same .Comment. +// +// Note: All [PublicTokenInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *TokensPreviewAPI) GetByComment(ctx context.Context, name string) (*PublicTokenInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]PublicTokenInfo{} + for _, v := range result { + key := v.Comment + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("PublicTokenInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of PublicTokenInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type WorkspaceConfPreviewInterface interface { + + // Check configuration status. + // + // Gets the configuration status for a workspace. + GetStatus(ctx context.Context, request GetStatusRequest) (*map[string]string, error) + + // Enable/disable features. + // + // Sets the configuration status for a workspace, including enabling or + // disabling it. + SetStatus(ctx context.Context, request WorkspaceConf) error +} + +func NewWorkspaceConfPreview(client *client.DatabricksClient) *WorkspaceConfPreviewAPI { + return &WorkspaceConfPreviewAPI{ + workspaceConfPreviewImpl: workspaceConfPreviewImpl{ + client: client, + }, + } +} + +// This API allows updating known workspace settings for advanced users. +type WorkspaceConfPreviewAPI struct { + workspaceConfPreviewImpl +} diff --git a/settings/v2preview/client.go b/settings/v2preview/client.go new file mode 100755 index 000000000..add625788 --- /dev/null +++ b/settings/v2preview/client.go @@ -0,0 +1,859 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package settingspreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type AccountIpAccessListsPreviewClient struct { + AccountIpAccessListsPreviewInterface + + Config *config.Config +} + +func NewAccountIpAccessListsPreviewClient(cfg *config.Config) (*AccountIpAccessListsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &AccountIpAccessListsPreviewClient{ + Config: cfg, + AccountIpAccessListsPreviewInterface: NewAccountIpAccessListsPreview(apiClient), + }, nil +} + +type AccountSettingsClient struct { + AccountSettingsInterface + + Config *config.Config +} + +func NewAccountSettingsClient(cfg *config.Config) (*AccountSettingsClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &AccountSettingsClient{ + Config: cfg, + AccountSettingsInterface: NewAccountSettings(apiClient), + }, nil +} + +type AccountSettingsPreviewClient struct { + AccountSettingsPreviewInterface + + Config *config.Config +} + +func NewAccountSettingsPreviewClient(cfg *config.Config) (*AccountSettingsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &AccountSettingsPreviewClient{ + Config: cfg, + AccountSettingsPreviewInterface: NewAccountSettingsPreview(apiClient), + }, nil +} + +type AibiDashboardEmbeddingAccessPolicyPreviewClient struct { + AibiDashboardEmbeddingAccessPolicyPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewAibiDashboardEmbeddingAccessPolicyPreviewClient(cfg *config.Config) (*AibiDashboardEmbeddingAccessPolicyPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &AibiDashboardEmbeddingAccessPolicyPreviewClient{ + Config: cfg, + apiClient: apiClient, + AibiDashboardEmbeddingAccessPolicyPreviewInterface: NewAibiDashboardEmbeddingAccessPolicyPreview(databricksClient), + }, nil +} + +type AibiDashboardEmbeddingApprovedDomainsPreviewClient struct { + AibiDashboardEmbeddingApprovedDomainsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewAibiDashboardEmbeddingApprovedDomainsPreviewClient(cfg *config.Config) (*AibiDashboardEmbeddingApprovedDomainsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &AibiDashboardEmbeddingApprovedDomainsPreviewClient{ + Config: cfg, + apiClient: apiClient, + AibiDashboardEmbeddingApprovedDomainsPreviewInterface: NewAibiDashboardEmbeddingApprovedDomainsPreview(databricksClient), + }, nil +} + +type AutomaticClusterUpdatePreviewClient struct { + AutomaticClusterUpdatePreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewAutomaticClusterUpdatePreviewClient(cfg *config.Config) (*AutomaticClusterUpdatePreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &AutomaticClusterUpdatePreviewClient{ + Config: cfg, + apiClient: apiClient, + AutomaticClusterUpdatePreviewInterface: NewAutomaticClusterUpdatePreview(databricksClient), + }, nil +} + +type ComplianceSecurityProfilePreviewClient struct { + ComplianceSecurityProfilePreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewComplianceSecurityProfilePreviewClient(cfg *config.Config) (*ComplianceSecurityProfilePreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ComplianceSecurityProfilePreviewClient{ + Config: cfg, + apiClient: apiClient, + ComplianceSecurityProfilePreviewInterface: NewComplianceSecurityProfilePreview(databricksClient), + }, nil +} + +type CredentialsManagerPreviewClient struct { + CredentialsManagerPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewCredentialsManagerPreviewClient(cfg *config.Config) (*CredentialsManagerPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &CredentialsManagerPreviewClient{ + Config: cfg, + apiClient: apiClient, + CredentialsManagerPreviewInterface: NewCredentialsManagerPreview(databricksClient), + }, nil +} + +type CspEnablementAccountPreviewClient struct { + CspEnablementAccountPreviewInterface + + Config *config.Config +} + +func NewCspEnablementAccountPreviewClient(cfg *config.Config) (*CspEnablementAccountPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &CspEnablementAccountPreviewClient{ + Config: cfg, + CspEnablementAccountPreviewInterface: NewCspEnablementAccountPreview(apiClient), + }, nil +} + +type DefaultNamespacePreviewClient struct { + DefaultNamespacePreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewDefaultNamespacePreviewClient(cfg *config.Config) (*DefaultNamespacePreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &DefaultNamespacePreviewClient{ + Config: cfg, + apiClient: apiClient, + DefaultNamespacePreviewInterface: NewDefaultNamespacePreview(databricksClient), + }, nil +} + +type DisableLegacyAccessPreviewClient struct { + DisableLegacyAccessPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewDisableLegacyAccessPreviewClient(cfg *config.Config) (*DisableLegacyAccessPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &DisableLegacyAccessPreviewClient{ + Config: cfg, + apiClient: apiClient, + DisableLegacyAccessPreviewInterface: NewDisableLegacyAccessPreview(databricksClient), + }, nil +} + +type DisableLegacyDbfsPreviewClient struct { + DisableLegacyDbfsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewDisableLegacyDbfsPreviewClient(cfg *config.Config) (*DisableLegacyDbfsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &DisableLegacyDbfsPreviewClient{ + Config: cfg, + apiClient: apiClient, + DisableLegacyDbfsPreviewInterface: NewDisableLegacyDbfsPreview(databricksClient), + }, nil +} + +type DisableLegacyFeaturesPreviewClient struct { + DisableLegacyFeaturesPreviewInterface + + Config *config.Config +} + +func NewDisableLegacyFeaturesPreviewClient(cfg *config.Config) (*DisableLegacyFeaturesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &DisableLegacyFeaturesPreviewClient{ + Config: cfg, + DisableLegacyFeaturesPreviewInterface: NewDisableLegacyFeaturesPreview(apiClient), + }, nil +} + +type EnableIpAccessListsPreviewClient struct { + EnableIpAccessListsPreviewInterface + + Config *config.Config +} + +func NewEnableIpAccessListsPreviewClient(cfg *config.Config) (*EnableIpAccessListsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &EnableIpAccessListsPreviewClient{ + Config: cfg, + EnableIpAccessListsPreviewInterface: NewEnableIpAccessListsPreview(apiClient), + }, nil +} + +type EnhancedSecurityMonitoringPreviewClient struct { + EnhancedSecurityMonitoringPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewEnhancedSecurityMonitoringPreviewClient(cfg *config.Config) (*EnhancedSecurityMonitoringPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &EnhancedSecurityMonitoringPreviewClient{ + Config: cfg, + apiClient: apiClient, + EnhancedSecurityMonitoringPreviewInterface: NewEnhancedSecurityMonitoringPreview(databricksClient), + }, nil +} + +type EsmEnablementAccountPreviewClient struct { + EsmEnablementAccountPreviewInterface + + Config *config.Config +} + +func NewEsmEnablementAccountPreviewClient(cfg *config.Config) (*EsmEnablementAccountPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &EsmEnablementAccountPreviewClient{ + Config: cfg, + EsmEnablementAccountPreviewInterface: NewEsmEnablementAccountPreview(apiClient), + }, nil +} + +type IpAccessListsPreviewClient struct { + IpAccessListsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewIpAccessListsPreviewClient(cfg *config.Config) (*IpAccessListsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &IpAccessListsPreviewClient{ + Config: cfg, + apiClient: apiClient, + IpAccessListsPreviewInterface: NewIpAccessListsPreview(databricksClient), + }, nil +} + +type NetworkConnectivityPreviewClient struct { + NetworkConnectivityPreviewInterface + + Config *config.Config +} + +func NewNetworkConnectivityPreviewClient(cfg *config.Config) (*NetworkConnectivityPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &NetworkConnectivityPreviewClient{ + Config: cfg, + NetworkConnectivityPreviewInterface: NewNetworkConnectivityPreview(apiClient), + }, nil +} + +type NotificationDestinationsPreviewClient struct { + NotificationDestinationsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewNotificationDestinationsPreviewClient(cfg *config.Config) (*NotificationDestinationsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &NotificationDestinationsPreviewClient{ + Config: cfg, + apiClient: apiClient, + NotificationDestinationsPreviewInterface: NewNotificationDestinationsPreview(databricksClient), + }, nil +} + +type PersonalComputePreviewClient struct { + PersonalComputePreviewInterface + + Config *config.Config +} + +func NewPersonalComputePreviewClient(cfg *config.Config) (*PersonalComputePreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + + if cfg.AccountID == "" || !cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") + } + apiClient, err := client.New(cfg) + if err != nil { + return nil, err + } + + return &PersonalComputePreviewClient{ + Config: cfg, + PersonalComputePreviewInterface: NewPersonalComputePreview(apiClient), + }, nil +} + +type RestrictWorkspaceAdminsPreviewClient struct { + RestrictWorkspaceAdminsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewRestrictWorkspaceAdminsPreviewClient(cfg *config.Config) (*RestrictWorkspaceAdminsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &RestrictWorkspaceAdminsPreviewClient{ + Config: cfg, + apiClient: apiClient, + RestrictWorkspaceAdminsPreviewInterface: NewRestrictWorkspaceAdminsPreview(databricksClient), + }, nil +} + +type SettingsClient struct { + SettingsInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewSettingsClient(cfg *config.Config) (*SettingsClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &SettingsClient{ + Config: cfg, + apiClient: apiClient, + SettingsInterface: NewSettings(databricksClient), + }, nil +} + +type SettingsPreviewClient struct { + SettingsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewSettingsPreviewClient(cfg *config.Config) (*SettingsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &SettingsPreviewClient{ + Config: cfg, + apiClient: apiClient, + SettingsPreviewInterface: NewSettingsPreview(databricksClient), + }, nil +} + +type TokenManagementPreviewClient struct { + TokenManagementPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewTokenManagementPreviewClient(cfg *config.Config) (*TokenManagementPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &TokenManagementPreviewClient{ + Config: cfg, + apiClient: apiClient, + TokenManagementPreviewInterface: NewTokenManagementPreview(databricksClient), + }, nil +} + +type TokensPreviewClient struct { + TokensPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewTokensPreviewClient(cfg *config.Config) (*TokensPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &TokensPreviewClient{ + Config: cfg, + apiClient: apiClient, + TokensPreviewInterface: NewTokensPreview(databricksClient), + }, nil +} + +type WorkspaceConfPreviewClient struct { + WorkspaceConfPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewWorkspaceConfPreviewClient(cfg *config.Config) (*WorkspaceConfPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &WorkspaceConfPreviewClient{ + Config: cfg, + apiClient: apiClient, + WorkspaceConfPreviewInterface: NewWorkspaceConfPreview(databricksClient), + }, nil +} diff --git a/settings/v2preview/impl.go b/settings/v2preview/impl.go new file mode 100755 index 000000000..35d75cf7f --- /dev/null +++ b/settings/v2preview/impl.go @@ -0,0 +1,1150 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package settingspreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just AccountIpAccessListsPreview API methods +type accountIpAccessListsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *accountIpAccessListsPreviewImpl) Create(ctx context.Context, request CreateIpAccessList) (*CreateIpAccessListResponse, error) { + var createIpAccessListResponse CreateIpAccessListResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createIpAccessListResponse) + return &createIpAccessListResponse, err +} + +func (a *accountIpAccessListsPreviewImpl) Delete(ctx context.Context, request DeleteAccountIpAccessListRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *accountIpAccessListsPreviewImpl) Get(ctx context.Context, request GetAccountIpAccessListRequest) (*GetIpAccessListResponse, error) { + var getIpAccessListResponse GetIpAccessListResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getIpAccessListResponse) + return &getIpAccessListResponse, err +} + +// Get access lists. +// +// Gets all IP access lists for the specified account. +func (a *accountIpAccessListsPreviewImpl) List(ctx context.Context) listing.Iterator[IpAccessListInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*GetIpAccessListsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *GetIpAccessListsResponse) []IpAccessListInfo { + return resp.IpAccessLists + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get access lists. +// +// Gets all IP access lists for the specified account. +func (a *accountIpAccessListsPreviewImpl) ListAll(ctx context.Context) ([]IpAccessListInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[IpAccessListInfo](ctx, iterator) +} +func (a *accountIpAccessListsPreviewImpl) internalList(ctx context.Context) (*GetIpAccessListsResponse, error) { + var getIpAccessListsResponse GetIpAccessListsResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists", a.client.ConfiguredAccountID()) + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getIpAccessListsResponse) + return &getIpAccessListsResponse, err +} + +func (a *accountIpAccessListsPreviewImpl) Replace(ctx context.Context, request ReplaceIpAccessList) error { + var replaceResponse ReplaceResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &replaceResponse) + return err +} + +func (a *accountIpAccessListsPreviewImpl) Update(ctx context.Context, request UpdateIpAccessList) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just AccountSettings API methods +type accountSettingsImpl struct { + client *client.DatabricksClient +} + +// unexported type that holds implementations of just AccountSettingsPreview API methods +type accountSettingsPreviewImpl struct { + client *client.DatabricksClient +} + +// unexported type that holds implementations of just AibiDashboardEmbeddingAccessPolicyPreview API methods +type aibiDashboardEmbeddingAccessPolicyPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *aibiDashboardEmbeddingAccessPolicyPreviewImpl) Delete(ctx context.Context, request DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) (*DeleteAibiDashboardEmbeddingAccessPolicySettingResponse, error) { + var deleteAibiDashboardEmbeddingAccessPolicySettingResponse DeleteAibiDashboardEmbeddingAccessPolicySettingResponse + path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_acc_policy/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteAibiDashboardEmbeddingAccessPolicySettingResponse) + return &deleteAibiDashboardEmbeddingAccessPolicySettingResponse, err +} + +func (a *aibiDashboardEmbeddingAccessPolicyPreviewImpl) Get(ctx context.Context, request GetAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) { + var aibiDashboardEmbeddingAccessPolicySetting AibiDashboardEmbeddingAccessPolicySetting + path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_acc_policy/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &aibiDashboardEmbeddingAccessPolicySetting) + return &aibiDashboardEmbeddingAccessPolicySetting, err +} + +func (a *aibiDashboardEmbeddingAccessPolicyPreviewImpl) Update(ctx context.Context, request UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) { + var aibiDashboardEmbeddingAccessPolicySetting AibiDashboardEmbeddingAccessPolicySetting + path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_acc_policy/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &aibiDashboardEmbeddingAccessPolicySetting) + return &aibiDashboardEmbeddingAccessPolicySetting, err +} + +// unexported type that holds implementations of just AibiDashboardEmbeddingApprovedDomainsPreview API methods +type aibiDashboardEmbeddingApprovedDomainsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *aibiDashboardEmbeddingApprovedDomainsPreviewImpl) Delete(ctx context.Context, request DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse, error) { + var deleteAibiDashboardEmbeddingApprovedDomainsSettingResponse DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse + path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteAibiDashboardEmbeddingApprovedDomainsSettingResponse) + return &deleteAibiDashboardEmbeddingApprovedDomainsSettingResponse, err +} + +func (a *aibiDashboardEmbeddingApprovedDomainsPreviewImpl) Get(ctx context.Context, request GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) { + var aibiDashboardEmbeddingApprovedDomainsSetting AibiDashboardEmbeddingApprovedDomainsSetting + path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &aibiDashboardEmbeddingApprovedDomainsSetting) + return &aibiDashboardEmbeddingApprovedDomainsSetting, err +} + +func (a *aibiDashboardEmbeddingApprovedDomainsPreviewImpl) Update(ctx context.Context, request UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) { + var aibiDashboardEmbeddingApprovedDomainsSetting AibiDashboardEmbeddingApprovedDomainsSetting + path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &aibiDashboardEmbeddingApprovedDomainsSetting) + return &aibiDashboardEmbeddingApprovedDomainsSetting, err +} + +// unexported type that holds implementations of just AutomaticClusterUpdatePreview API methods +type automaticClusterUpdatePreviewImpl struct { + client *client.DatabricksClient +} + +func (a *automaticClusterUpdatePreviewImpl) Get(ctx context.Context, request GetAutomaticClusterUpdateSettingRequest) (*AutomaticClusterUpdateSetting, error) { + var automaticClusterUpdateSetting AutomaticClusterUpdateSetting + path := "/api/2.0preview/settings/types/automatic_cluster_update/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &automaticClusterUpdateSetting) + return &automaticClusterUpdateSetting, err +} + +func (a *automaticClusterUpdatePreviewImpl) Update(ctx context.Context, request UpdateAutomaticClusterUpdateSettingRequest) (*AutomaticClusterUpdateSetting, error) { + var automaticClusterUpdateSetting AutomaticClusterUpdateSetting + path := "/api/2.0preview/settings/types/automatic_cluster_update/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &automaticClusterUpdateSetting) + return &automaticClusterUpdateSetting, err +} + +// unexported type that holds implementations of just ComplianceSecurityProfilePreview API methods +type complianceSecurityProfilePreviewImpl struct { + client *client.DatabricksClient +} + +func (a *complianceSecurityProfilePreviewImpl) Get(ctx context.Context, request GetComplianceSecurityProfileSettingRequest) (*ComplianceSecurityProfileSetting, error) { + var complianceSecurityProfileSetting ComplianceSecurityProfileSetting + path := "/api/2.0preview/settings/types/shield_csp_enablement_ws_db/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &complianceSecurityProfileSetting) + return &complianceSecurityProfileSetting, err +} + +func (a *complianceSecurityProfilePreviewImpl) Update(ctx context.Context, request UpdateComplianceSecurityProfileSettingRequest) (*ComplianceSecurityProfileSetting, error) { + var complianceSecurityProfileSetting ComplianceSecurityProfileSetting + path := "/api/2.0preview/settings/types/shield_csp_enablement_ws_db/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &complianceSecurityProfileSetting) + return &complianceSecurityProfileSetting, err +} + +// unexported type that holds implementations of just CredentialsManagerPreview API methods +type credentialsManagerPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *credentialsManagerPreviewImpl) ExchangeToken(ctx context.Context, request ExchangeTokenRequest) (*ExchangeTokenResponse, error) { + var exchangeTokenResponse ExchangeTokenResponse + path := "/api/2.0preview/credentials-manager/exchange-tokens/token" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &exchangeTokenResponse) + return &exchangeTokenResponse, err +} + +// unexported type that holds implementations of just CspEnablementAccountPreview API methods +type cspEnablementAccountPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *cspEnablementAccountPreviewImpl) Get(ctx context.Context, request GetCspEnablementAccountSettingRequest) (*CspEnablementAccountSetting, error) { + var cspEnablementAccountSetting CspEnablementAccountSetting + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/shield_csp_enablement_ac/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &cspEnablementAccountSetting) + return &cspEnablementAccountSetting, err +} + +func (a *cspEnablementAccountPreviewImpl) Update(ctx context.Context, request UpdateCspEnablementAccountSettingRequest) (*CspEnablementAccountSetting, error) { + var cspEnablementAccountSetting CspEnablementAccountSetting + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/shield_csp_enablement_ac/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &cspEnablementAccountSetting) + return &cspEnablementAccountSetting, err +} + +// unexported type that holds implementations of just DefaultNamespacePreview API methods +type defaultNamespacePreviewImpl struct { + client *client.DatabricksClient +} + +func (a *defaultNamespacePreviewImpl) Delete(ctx context.Context, request DeleteDefaultNamespaceSettingRequest) (*DeleteDefaultNamespaceSettingResponse, error) { + var deleteDefaultNamespaceSettingResponse DeleteDefaultNamespaceSettingResponse + path := "/api/2.0preview/settings/types/default_namespace_ws/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDefaultNamespaceSettingResponse) + return &deleteDefaultNamespaceSettingResponse, err +} + +func (a *defaultNamespacePreviewImpl) Get(ctx context.Context, request GetDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) { + var defaultNamespaceSetting DefaultNamespaceSetting + path := "/api/2.0preview/settings/types/default_namespace_ws/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &defaultNamespaceSetting) + return &defaultNamespaceSetting, err +} + +func (a *defaultNamespacePreviewImpl) Update(ctx context.Context, request UpdateDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) { + var defaultNamespaceSetting DefaultNamespaceSetting + path := "/api/2.0preview/settings/types/default_namespace_ws/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &defaultNamespaceSetting) + return &defaultNamespaceSetting, err +} + +// unexported type that holds implementations of just DisableLegacyAccessPreview API methods +type disableLegacyAccessPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *disableLegacyAccessPreviewImpl) Delete(ctx context.Context, request DeleteDisableLegacyAccessRequest) (*DeleteDisableLegacyAccessResponse, error) { + var deleteDisableLegacyAccessResponse DeleteDisableLegacyAccessResponse + path := "/api/2.0preview/settings/types/disable_legacy_access/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDisableLegacyAccessResponse) + return &deleteDisableLegacyAccessResponse, err +} + +func (a *disableLegacyAccessPreviewImpl) Get(ctx context.Context, request GetDisableLegacyAccessRequest) (*DisableLegacyAccess, error) { + var disableLegacyAccess DisableLegacyAccess + path := "/api/2.0preview/settings/types/disable_legacy_access/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &disableLegacyAccess) + return &disableLegacyAccess, err +} + +func (a *disableLegacyAccessPreviewImpl) Update(ctx context.Context, request UpdateDisableLegacyAccessRequest) (*DisableLegacyAccess, error) { + var disableLegacyAccess DisableLegacyAccess + path := "/api/2.0preview/settings/types/disable_legacy_access/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &disableLegacyAccess) + return &disableLegacyAccess, err +} + +// unexported type that holds implementations of just DisableLegacyDbfsPreview API methods +type disableLegacyDbfsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *disableLegacyDbfsPreviewImpl) Delete(ctx context.Context, request DeleteDisableLegacyDbfsRequest) (*DeleteDisableLegacyDbfsResponse, error) { + var deleteDisableLegacyDbfsResponse DeleteDisableLegacyDbfsResponse + path := "/api/2.0preview/settings/types/disable_legacy_dbfs/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDisableLegacyDbfsResponse) + return &deleteDisableLegacyDbfsResponse, err +} + +func (a *disableLegacyDbfsPreviewImpl) Get(ctx context.Context, request GetDisableLegacyDbfsRequest) (*DisableLegacyDbfs, error) { + var disableLegacyDbfs DisableLegacyDbfs + path := "/api/2.0preview/settings/types/disable_legacy_dbfs/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &disableLegacyDbfs) + return &disableLegacyDbfs, err +} + +func (a *disableLegacyDbfsPreviewImpl) Update(ctx context.Context, request UpdateDisableLegacyDbfsRequest) (*DisableLegacyDbfs, error) { + var disableLegacyDbfs DisableLegacyDbfs + path := "/api/2.0preview/settings/types/disable_legacy_dbfs/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &disableLegacyDbfs) + return &disableLegacyDbfs, err +} + +// unexported type that holds implementations of just DisableLegacyFeaturesPreview API methods +type disableLegacyFeaturesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *disableLegacyFeaturesPreviewImpl) Delete(ctx context.Context, request DeleteDisableLegacyFeaturesRequest) (*DeleteDisableLegacyFeaturesResponse, error) { + var deleteDisableLegacyFeaturesResponse DeleteDisableLegacyFeaturesResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDisableLegacyFeaturesResponse) + return &deleteDisableLegacyFeaturesResponse, err +} + +func (a *disableLegacyFeaturesPreviewImpl) Get(ctx context.Context, request GetDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) { + var disableLegacyFeatures DisableLegacyFeatures + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &disableLegacyFeatures) + return &disableLegacyFeatures, err +} + +func (a *disableLegacyFeaturesPreviewImpl) Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) { + var disableLegacyFeatures DisableLegacyFeatures + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &disableLegacyFeatures) + return &disableLegacyFeatures, err +} + +// unexported type that holds implementations of just EnableIpAccessListsPreview API methods +type enableIpAccessListsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *enableIpAccessListsPreviewImpl) Delete(ctx context.Context, request DeleteAccountIpAccessEnableRequest) (*DeleteAccountIpAccessEnableResponse, error) { + var deleteAccountIpAccessEnableResponse DeleteAccountIpAccessEnableResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/acct_ip_acl_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteAccountIpAccessEnableResponse) + return &deleteAccountIpAccessEnableResponse, err +} + +func (a *enableIpAccessListsPreviewImpl) Get(ctx context.Context, request GetAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) { + var accountIpAccessEnable AccountIpAccessEnable + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/acct_ip_acl_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &accountIpAccessEnable) + return &accountIpAccessEnable, err +} + +func (a *enableIpAccessListsPreviewImpl) Update(ctx context.Context, request UpdateAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) { + var accountIpAccessEnable AccountIpAccessEnable + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/acct_ip_acl_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &accountIpAccessEnable) + return &accountIpAccessEnable, err +} + +// unexported type that holds implementations of just EnhancedSecurityMonitoringPreview API methods +type enhancedSecurityMonitoringPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *enhancedSecurityMonitoringPreviewImpl) Get(ctx context.Context, request GetEnhancedSecurityMonitoringSettingRequest) (*EnhancedSecurityMonitoringSetting, error) { + var enhancedSecurityMonitoringSetting EnhancedSecurityMonitoringSetting + path := "/api/2.0preview/settings/types/shield_esm_enablement_ws_db/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &enhancedSecurityMonitoringSetting) + return &enhancedSecurityMonitoringSetting, err +} + +func (a *enhancedSecurityMonitoringPreviewImpl) Update(ctx context.Context, request UpdateEnhancedSecurityMonitoringSettingRequest) (*EnhancedSecurityMonitoringSetting, error) { + var enhancedSecurityMonitoringSetting EnhancedSecurityMonitoringSetting + path := "/api/2.0preview/settings/types/shield_esm_enablement_ws_db/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &enhancedSecurityMonitoringSetting) + return &enhancedSecurityMonitoringSetting, err +} + +// unexported type that holds implementations of just EsmEnablementAccountPreview API methods +type esmEnablementAccountPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *esmEnablementAccountPreviewImpl) Get(ctx context.Context, request GetEsmEnablementAccountSettingRequest) (*EsmEnablementAccountSetting, error) { + var esmEnablementAccountSetting EsmEnablementAccountSetting + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/shield_esm_enablement_ac/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &esmEnablementAccountSetting) + return &esmEnablementAccountSetting, err +} + +func (a *esmEnablementAccountPreviewImpl) Update(ctx context.Context, request UpdateEsmEnablementAccountSettingRequest) (*EsmEnablementAccountSetting, error) { + var esmEnablementAccountSetting EsmEnablementAccountSetting + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/shield_esm_enablement_ac/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &esmEnablementAccountSetting) + return &esmEnablementAccountSetting, err +} + +// unexported type that holds implementations of just IpAccessListsPreview API methods +type ipAccessListsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *ipAccessListsPreviewImpl) Create(ctx context.Context, request CreateIpAccessList) (*CreateIpAccessListResponse, error) { + var createIpAccessListResponse CreateIpAccessListResponse + path := "/api/2.0preview/ip-access-lists" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createIpAccessListResponse) + return &createIpAccessListResponse, err +} + +func (a *ipAccessListsPreviewImpl) Delete(ctx context.Context, request DeleteIpAccessListRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/ip-access-lists/%v", request.IpAccessListId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *ipAccessListsPreviewImpl) Get(ctx context.Context, request GetIpAccessListRequest) (*FetchIpAccessListResponse, error) { + var fetchIpAccessListResponse FetchIpAccessListResponse + path := fmt.Sprintf("/api/2.0preview/ip-access-lists/%v", request.IpAccessListId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &fetchIpAccessListResponse) + return &fetchIpAccessListResponse, err +} + +// Get access lists. +// +// Gets all IP access lists for the specified workspace. +func (a *ipAccessListsPreviewImpl) List(ctx context.Context) listing.Iterator[IpAccessListInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListIpAccessListResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListIpAccessListResponse) []IpAccessListInfo { + return resp.IpAccessLists + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get access lists. +// +// Gets all IP access lists for the specified workspace. +func (a *ipAccessListsPreviewImpl) ListAll(ctx context.Context) ([]IpAccessListInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[IpAccessListInfo](ctx, iterator) +} +func (a *ipAccessListsPreviewImpl) internalList(ctx context.Context) (*ListIpAccessListResponse, error) { + var listIpAccessListResponse ListIpAccessListResponse + path := "/api/2.0preview/ip-access-lists" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listIpAccessListResponse) + return &listIpAccessListResponse, err +} + +func (a *ipAccessListsPreviewImpl) Replace(ctx context.Context, request ReplaceIpAccessList) error { + var replaceResponse ReplaceResponse + path := fmt.Sprintf("/api/2.0preview/ip-access-lists/%v", request.IpAccessListId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &replaceResponse) + return err +} + +func (a *ipAccessListsPreviewImpl) Update(ctx context.Context, request UpdateIpAccessList) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/ip-access-lists/%v", request.IpAccessListId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just NetworkConnectivityPreview API methods +type networkConnectivityPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *networkConnectivityPreviewImpl) CreateNetworkConnectivityConfiguration(ctx context.Context, request CreateNetworkConnectivityConfigRequest) (*NetworkConnectivityConfiguration, error) { + var networkConnectivityConfiguration NetworkConnectivityConfiguration + path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &networkConnectivityConfiguration) + return &networkConnectivityConfiguration, err +} + +func (a *networkConnectivityPreviewImpl) CreatePrivateEndpointRule(ctx context.Context, request CreatePrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { + var nccAzurePrivateEndpointRule NccAzurePrivateEndpointRule + path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &nccAzurePrivateEndpointRule) + return &nccAzurePrivateEndpointRule, err +} + +func (a *networkConnectivityPreviewImpl) DeleteNetworkConnectivityConfiguration(ctx context.Context, request DeleteNetworkConnectivityConfigurationRequest) error { + var deleteNetworkConnectivityConfigurationResponse DeleteNetworkConnectivityConfigurationResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteNetworkConnectivityConfigurationResponse) + return err +} + +func (a *networkConnectivityPreviewImpl) DeletePrivateEndpointRule(ctx context.Context, request DeletePrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { + var nccAzurePrivateEndpointRule NccAzurePrivateEndpointRule + path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId, request.PrivateEndpointRuleId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &nccAzurePrivateEndpointRule) + return &nccAzurePrivateEndpointRule, err +} + +func (a *networkConnectivityPreviewImpl) GetNetworkConnectivityConfiguration(ctx context.Context, request GetNetworkConnectivityConfigurationRequest) (*NetworkConnectivityConfiguration, error) { + var networkConnectivityConfiguration NetworkConnectivityConfiguration + path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &networkConnectivityConfiguration) + return &networkConnectivityConfiguration, err +} + +func (a *networkConnectivityPreviewImpl) GetPrivateEndpointRule(ctx context.Context, request GetPrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { + var nccAzurePrivateEndpointRule NccAzurePrivateEndpointRule + path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId, request.PrivateEndpointRuleId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &nccAzurePrivateEndpointRule) + return &nccAzurePrivateEndpointRule, err +} + +// List network connectivity configurations. +// +// Gets an array of network connectivity configurations. +func (a *networkConnectivityPreviewImpl) ListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) listing.Iterator[NetworkConnectivityConfiguration] { + + getNextPage := func(ctx context.Context, req ListNetworkConnectivityConfigurationsRequest) (*ListNetworkConnectivityConfigurationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListNetworkConnectivityConfigurations(ctx, req) + } + getItems := func(resp *ListNetworkConnectivityConfigurationsResponse) []NetworkConnectivityConfiguration { + return resp.Items + } + getNextReq := func(resp *ListNetworkConnectivityConfigurationsResponse) *ListNetworkConnectivityConfigurationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List network connectivity configurations. +// +// Gets an array of network connectivity configurations. +func (a *networkConnectivityPreviewImpl) ListNetworkConnectivityConfigurationsAll(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) ([]NetworkConnectivityConfiguration, error) { + iterator := a.ListNetworkConnectivityConfigurations(ctx, request) + return listing.ToSlice[NetworkConnectivityConfiguration](ctx, iterator) +} +func (a *networkConnectivityPreviewImpl) internalListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) (*ListNetworkConnectivityConfigurationsResponse, error) { + var listNetworkConnectivityConfigurationsResponse ListNetworkConnectivityConfigurationsResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listNetworkConnectivityConfigurationsResponse) + return &listNetworkConnectivityConfigurationsResponse, err +} + +// List private endpoint rules. +// +// Gets an array of private endpoint rules. +func (a *networkConnectivityPreviewImpl) ListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) listing.Iterator[NccAzurePrivateEndpointRule] { + + getNextPage := func(ctx context.Context, req ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListPrivateEndpointRules(ctx, req) + } + getItems := func(resp *ListNccAzurePrivateEndpointRulesResponse) []NccAzurePrivateEndpointRule { + return resp.Items + } + getNextReq := func(resp *ListNccAzurePrivateEndpointRulesResponse) *ListPrivateEndpointRulesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List private endpoint rules. +// +// Gets an array of private endpoint rules. +func (a *networkConnectivityPreviewImpl) ListPrivateEndpointRulesAll(ctx context.Context, request ListPrivateEndpointRulesRequest) ([]NccAzurePrivateEndpointRule, error) { + iterator := a.ListPrivateEndpointRules(ctx, request) + return listing.ToSlice[NccAzurePrivateEndpointRule](ctx, iterator) +} +func (a *networkConnectivityPreviewImpl) internalListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) { + var listNccAzurePrivateEndpointRulesResponse ListNccAzurePrivateEndpointRulesResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listNccAzurePrivateEndpointRulesResponse) + return &listNccAzurePrivateEndpointRulesResponse, err +} + +// unexported type that holds implementations of just NotificationDestinationsPreview API methods +type notificationDestinationsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *notificationDestinationsPreviewImpl) Create(ctx context.Context, request CreateNotificationDestinationRequest) (*NotificationDestination, error) { + var notificationDestination NotificationDestination + path := "/api/2.0preview/notification-destinations" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, ¬ificationDestination) + return ¬ificationDestination, err +} + +func (a *notificationDestinationsPreviewImpl) Delete(ctx context.Context, request DeleteNotificationDestinationRequest) error { + var empty Empty + path := fmt.Sprintf("/api/2.0preview/notification-destinations/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &empty) + return err +} + +func (a *notificationDestinationsPreviewImpl) Get(ctx context.Context, request GetNotificationDestinationRequest) (*NotificationDestination, error) { + var notificationDestination NotificationDestination + path := fmt.Sprintf("/api/2.0preview/notification-destinations/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, ¬ificationDestination) + return ¬ificationDestination, err +} + +// List notification destinations. +// +// Lists notification destinations. +func (a *notificationDestinationsPreviewImpl) List(ctx context.Context, request ListNotificationDestinationsRequest) listing.Iterator[ListNotificationDestinationsResult] { + + getNextPage := func(ctx context.Context, req ListNotificationDestinationsRequest) (*ListNotificationDestinationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListNotificationDestinationsResponse) []ListNotificationDestinationsResult { + return resp.Results + } + getNextReq := func(resp *ListNotificationDestinationsResponse) *ListNotificationDestinationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List notification destinations. +// +// Lists notification destinations. +func (a *notificationDestinationsPreviewImpl) ListAll(ctx context.Context, request ListNotificationDestinationsRequest) ([]ListNotificationDestinationsResult, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ListNotificationDestinationsResult](ctx, iterator) +} +func (a *notificationDestinationsPreviewImpl) internalList(ctx context.Context, request ListNotificationDestinationsRequest) (*ListNotificationDestinationsResponse, error) { + var listNotificationDestinationsResponse ListNotificationDestinationsResponse + path := "/api/2.0preview/notification-destinations" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listNotificationDestinationsResponse) + return &listNotificationDestinationsResponse, err +} + +func (a *notificationDestinationsPreviewImpl) Update(ctx context.Context, request UpdateNotificationDestinationRequest) (*NotificationDestination, error) { + var notificationDestination NotificationDestination + path := fmt.Sprintf("/api/2.0preview/notification-destinations/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, ¬ificationDestination) + return ¬ificationDestination, err +} + +// unexported type that holds implementations of just PersonalComputePreview API methods +type personalComputePreviewImpl struct { + client *client.DatabricksClient +} + +func (a *personalComputePreviewImpl) Delete(ctx context.Context, request DeletePersonalComputeSettingRequest) (*DeletePersonalComputeSettingResponse, error) { + var deletePersonalComputeSettingResponse DeletePersonalComputeSettingResponse + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/dcp_acct_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deletePersonalComputeSettingResponse) + return &deletePersonalComputeSettingResponse, err +} + +func (a *personalComputePreviewImpl) Get(ctx context.Context, request GetPersonalComputeSettingRequest) (*PersonalComputeSetting, error) { + var personalComputeSetting PersonalComputeSetting + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/dcp_acct_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &personalComputeSetting) + return &personalComputeSetting, err +} + +func (a *personalComputePreviewImpl) Update(ctx context.Context, request UpdatePersonalComputeSettingRequest) (*PersonalComputeSetting, error) { + var personalComputeSetting PersonalComputeSetting + path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/dcp_acct_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &personalComputeSetting) + return &personalComputeSetting, err +} + +// unexported type that holds implementations of just RestrictWorkspaceAdminsPreview API methods +type restrictWorkspaceAdminsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *restrictWorkspaceAdminsPreviewImpl) Delete(ctx context.Context, request DeleteRestrictWorkspaceAdminsSettingRequest) (*DeleteRestrictWorkspaceAdminsSettingResponse, error) { + var deleteRestrictWorkspaceAdminsSettingResponse DeleteRestrictWorkspaceAdminsSettingResponse + path := "/api/2.0preview/settings/types/restrict_workspace_admins/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteRestrictWorkspaceAdminsSettingResponse) + return &deleteRestrictWorkspaceAdminsSettingResponse, err +} + +func (a *restrictWorkspaceAdminsPreviewImpl) Get(ctx context.Context, request GetRestrictWorkspaceAdminsSettingRequest) (*RestrictWorkspaceAdminsSetting, error) { + var restrictWorkspaceAdminsSetting RestrictWorkspaceAdminsSetting + path := "/api/2.0preview/settings/types/restrict_workspace_admins/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &restrictWorkspaceAdminsSetting) + return &restrictWorkspaceAdminsSetting, err +} + +func (a *restrictWorkspaceAdminsPreviewImpl) Update(ctx context.Context, request UpdateRestrictWorkspaceAdminsSettingRequest) (*RestrictWorkspaceAdminsSetting, error) { + var restrictWorkspaceAdminsSetting RestrictWorkspaceAdminsSetting + path := "/api/2.0preview/settings/types/restrict_workspace_admins/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &restrictWorkspaceAdminsSetting) + return &restrictWorkspaceAdminsSetting, err +} + +// unexported type that holds implementations of just Settings API methods +type settingsImpl struct { + client *client.DatabricksClient +} + +// unexported type that holds implementations of just SettingsPreview API methods +type settingsPreviewImpl struct { + client *client.DatabricksClient +} + +// unexported type that holds implementations of just TokenManagementPreview API methods +type tokenManagementPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *tokenManagementPreviewImpl) CreateOboToken(ctx context.Context, request CreateOboTokenRequest) (*CreateOboTokenResponse, error) { + var createOboTokenResponse CreateOboTokenResponse + path := "/api/2.0preview/token-management/on-behalf-of/tokens" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createOboTokenResponse) + return &createOboTokenResponse, err +} + +func (a *tokenManagementPreviewImpl) Delete(ctx context.Context, request DeleteTokenManagementRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/token-management/tokens/%v", request.TokenId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *tokenManagementPreviewImpl) Get(ctx context.Context, request GetTokenManagementRequest) (*GetTokenResponse, error) { + var getTokenResponse GetTokenResponse + path := fmt.Sprintf("/api/2.0preview/token-management/tokens/%v", request.TokenId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getTokenResponse) + return &getTokenResponse, err +} + +func (a *tokenManagementPreviewImpl) GetPermissionLevels(ctx context.Context) (*GetTokenPermissionLevelsResponse, error) { + var getTokenPermissionLevelsResponse GetTokenPermissionLevelsResponse + path := "/api/2.0preview/permissions/authorization/tokens/permissionLevels" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getTokenPermissionLevelsResponse) + return &getTokenPermissionLevelsResponse, err +} + +func (a *tokenManagementPreviewImpl) GetPermissions(ctx context.Context) (*TokenPermissions, error) { + var tokenPermissions TokenPermissions + path := "/api/2.0preview/permissions/authorization/tokens" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &tokenPermissions) + return &tokenPermissions, err +} + +// List all tokens. +// +// Lists all tokens associated with the specified workspace or user. +func (a *tokenManagementPreviewImpl) List(ctx context.Context, request ListTokenManagementRequest) listing.Iterator[TokenInfo] { + + getNextPage := func(ctx context.Context, req ListTokenManagementRequest) (*ListTokensResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListTokensResponse) []TokenInfo { + return resp.TokenInfos + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List all tokens. +// +// Lists all tokens associated with the specified workspace or user. +func (a *tokenManagementPreviewImpl) ListAll(ctx context.Context, request ListTokenManagementRequest) ([]TokenInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[TokenInfo](ctx, iterator) +} +func (a *tokenManagementPreviewImpl) internalList(ctx context.Context, request ListTokenManagementRequest) (*ListTokensResponse, error) { + var listTokensResponse ListTokensResponse + path := "/api/2.0preview/token-management/tokens" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listTokensResponse) + return &listTokensResponse, err +} + +func (a *tokenManagementPreviewImpl) SetPermissions(ctx context.Context, request TokenPermissionsRequest) (*TokenPermissions, error) { + var tokenPermissions TokenPermissions + path := "/api/2.0preview/permissions/authorization/tokens" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &tokenPermissions) + return &tokenPermissions, err +} + +func (a *tokenManagementPreviewImpl) UpdatePermissions(ctx context.Context, request TokenPermissionsRequest) (*TokenPermissions, error) { + var tokenPermissions TokenPermissions + path := "/api/2.0preview/permissions/authorization/tokens" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &tokenPermissions) + return &tokenPermissions, err +} + +// unexported type that holds implementations of just TokensPreview API methods +type tokensPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *tokensPreviewImpl) Create(ctx context.Context, request CreateTokenRequest) (*CreateTokenResponse, error) { + var createTokenResponse CreateTokenResponse + path := "/api/2.0preview/token/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createTokenResponse) + return &createTokenResponse, err +} + +func (a *tokensPreviewImpl) Delete(ctx context.Context, request RevokeTokenRequest) error { + var revokeTokenResponse RevokeTokenResponse + path := "/api/2.0preview/token/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &revokeTokenResponse) + return err +} + +// List tokens. +// +// Lists all the valid tokens for a user-workspace pair. +func (a *tokensPreviewImpl) List(ctx context.Context) listing.Iterator[PublicTokenInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListPublicTokensResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListPublicTokensResponse) []PublicTokenInfo { + return resp.TokenInfos + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List tokens. +// +// Lists all the valid tokens for a user-workspace pair. +func (a *tokensPreviewImpl) ListAll(ctx context.Context) ([]PublicTokenInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[PublicTokenInfo](ctx, iterator) +} +func (a *tokensPreviewImpl) internalList(ctx context.Context) (*ListPublicTokensResponse, error) { + var listPublicTokensResponse ListPublicTokensResponse + path := "/api/2.0preview/token/list" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listPublicTokensResponse) + return &listPublicTokensResponse, err +} + +// unexported type that holds implementations of just WorkspaceConfPreview API methods +type workspaceConfPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *workspaceConfPreviewImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*map[string]string, error) { + var workspaceConf map[string]string + path := "/api/2.0preview/workspace-conf" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &workspaceConf) + return &workspaceConf, err +} + +func (a *workspaceConfPreviewImpl) SetStatus(ctx context.Context, request WorkspaceConf) error { + var setStatusResponse SetStatusResponse + path := "/api/2.0preview/workspace-conf" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &setStatusResponse) + return err +} diff --git a/settings/v2preview/model.go b/settings/v2preview/model.go new file mode 100755 index 000000000..1e27828f1 --- /dev/null +++ b/settings/v2preview/model.go @@ -0,0 +1,2984 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package settingspreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type AccountIpAccessEnable struct { + AcctIpAclEnable BooleanMessage `json:"acct_ip_acl_enable"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AccountIpAccessEnable) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AccountIpAccessEnable) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AibiDashboardEmbeddingAccessPolicy struct { + AccessPolicyType AibiDashboardEmbeddingAccessPolicyAccessPolicyType `json:"access_policy_type"` +} + +type AibiDashboardEmbeddingAccessPolicyAccessPolicyType string + +const AibiDashboardEmbeddingAccessPolicyAccessPolicyTypeAllowAllDomains AibiDashboardEmbeddingAccessPolicyAccessPolicyType = `ALLOW_ALL_DOMAINS` + +const AibiDashboardEmbeddingAccessPolicyAccessPolicyTypeAllowApprovedDomains AibiDashboardEmbeddingAccessPolicyAccessPolicyType = `ALLOW_APPROVED_DOMAINS` + +const AibiDashboardEmbeddingAccessPolicyAccessPolicyTypeDenyAllDomains AibiDashboardEmbeddingAccessPolicyAccessPolicyType = `DENY_ALL_DOMAINS` + +// String representation for [fmt.Print] +func (f *AibiDashboardEmbeddingAccessPolicyAccessPolicyType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AibiDashboardEmbeddingAccessPolicyAccessPolicyType) Set(v string) error { + switch v { + case `ALLOW_ALL_DOMAINS`, `ALLOW_APPROVED_DOMAINS`, `DENY_ALL_DOMAINS`: + *f = AibiDashboardEmbeddingAccessPolicyAccessPolicyType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALLOW_ALL_DOMAINS", "ALLOW_APPROVED_DOMAINS", "DENY_ALL_DOMAINS"`, v) + } +} + +// Type always returns AibiDashboardEmbeddingAccessPolicyAccessPolicyType to satisfy [pflag.Value] interface +func (f *AibiDashboardEmbeddingAccessPolicyAccessPolicyType) Type() string { + return "AibiDashboardEmbeddingAccessPolicyAccessPolicyType" +} + +type AibiDashboardEmbeddingAccessPolicySetting struct { + AibiDashboardEmbeddingAccessPolicy AibiDashboardEmbeddingAccessPolicy `json:"aibi_dashboard_embedding_access_policy"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AibiDashboardEmbeddingAccessPolicySetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AibiDashboardEmbeddingAccessPolicySetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AibiDashboardEmbeddingApprovedDomains struct { + ApprovedDomains []string `json:"approved_domains,omitempty"` +} + +type AibiDashboardEmbeddingApprovedDomainsSetting struct { + AibiDashboardEmbeddingApprovedDomains AibiDashboardEmbeddingApprovedDomains `json:"aibi_dashboard_embedding_approved_domains"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AibiDashboardEmbeddingApprovedDomainsSetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AibiDashboardEmbeddingApprovedDomainsSetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AutomaticClusterUpdateSetting struct { + AutomaticClusterUpdateWorkspace ClusterAutoRestartMessage `json:"automatic_cluster_update_workspace"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AutomaticClusterUpdateSetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AutomaticClusterUpdateSetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type BooleanMessage struct { + Value bool `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *BooleanMessage) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s BooleanMessage) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterAutoRestartMessage struct { + CanToggle bool `json:"can_toggle,omitempty"` + + Enabled bool `json:"enabled,omitempty"` + // Contains an information about the enablement status judging (e.g. whether + // the enterprise tier is enabled) This is only additional information that + // MUST NOT be used to decide whether the setting is enabled or not. This is + // intended to use only for purposes like showing an error message to the + // customer with the additional details. For example, using these details we + // can check why exactly the feature is disabled for this customer. + EnablementDetails *ClusterAutoRestartMessageEnablementDetails `json:"enablement_details,omitempty"` + + MaintenanceWindow *ClusterAutoRestartMessageMaintenanceWindow `json:"maintenance_window,omitempty"` + + RestartEvenIfNoUpdatesAvailable bool `json:"restart_even_if_no_updates_available,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterAutoRestartMessage) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterAutoRestartMessage) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Contains an information about the enablement status judging (e.g. whether the +// enterprise tier is enabled) This is only additional information that MUST NOT +// be used to decide whether the setting is enabled or not. This is intended to +// use only for purposes like showing an error message to the customer with the +// additional details. For example, using these details we can check why exactly +// the feature is disabled for this customer. +type ClusterAutoRestartMessageEnablementDetails struct { + // The feature is force enabled if compliance mode is active + ForcedForComplianceMode bool `json:"forced_for_compliance_mode,omitempty"` + // The feature is unavailable if the corresponding entitlement disabled (see + // getShieldEntitlementEnable) + UnavailableForDisabledEntitlement bool `json:"unavailable_for_disabled_entitlement,omitempty"` + // The feature is unavailable if the customer doesn't have enterprise tier + UnavailableForNonEnterpriseTier bool `json:"unavailable_for_non_enterprise_tier,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterAutoRestartMessageEnablementDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterAutoRestartMessageEnablementDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ClusterAutoRestartMessageMaintenanceWindow struct { + WeekDayBasedSchedule *ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule `json:"week_day_based_schedule,omitempty"` +} + +type ClusterAutoRestartMessageMaintenanceWindowDayOfWeek string + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekFriday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `FRIDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekMonday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `MONDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekSaturday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `SATURDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekSunday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `SUNDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekThursday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `THURSDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekTuesday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `TUESDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekWednesday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `WEDNESDAY` + +// String representation for [fmt.Print] +func (f *ClusterAutoRestartMessageMaintenanceWindowDayOfWeek) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ClusterAutoRestartMessageMaintenanceWindowDayOfWeek) Set(v string) error { + switch v { + case `FRIDAY`, `MONDAY`, `SATURDAY`, `SUNDAY`, `THURSDAY`, `TUESDAY`, `WEDNESDAY`: + *f = ClusterAutoRestartMessageMaintenanceWindowDayOfWeek(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FRIDAY", "MONDAY", "SATURDAY", "SUNDAY", "THURSDAY", "TUESDAY", "WEDNESDAY"`, v) + } +} + +// Type always returns ClusterAutoRestartMessageMaintenanceWindowDayOfWeek to satisfy [pflag.Value] interface +func (f *ClusterAutoRestartMessageMaintenanceWindowDayOfWeek) Type() string { + return "ClusterAutoRestartMessageMaintenanceWindowDayOfWeek" +} + +type ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule struct { + DayOfWeek ClusterAutoRestartMessageMaintenanceWindowDayOfWeek `json:"day_of_week,omitempty"` + + Frequency ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency `json:"frequency,omitempty"` + + WindowStartTime *ClusterAutoRestartMessageMaintenanceWindowWindowStartTime `json:"window_start_time,omitempty"` +} + +type ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency string + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencyEveryWeek ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `EVERY_WEEK` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencyFirstAndThirdOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `FIRST_AND_THIRD_OF_MONTH` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencyFirstOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `FIRST_OF_MONTH` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencyFourthOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `FOURTH_OF_MONTH` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencySecondAndFourthOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `SECOND_AND_FOURTH_OF_MONTH` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencySecondOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `SECOND_OF_MONTH` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencyThirdOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `THIRD_OF_MONTH` + +// String representation for [fmt.Print] +func (f *ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency) Set(v string) error { + switch v { + case `EVERY_WEEK`, `FIRST_AND_THIRD_OF_MONTH`, `FIRST_OF_MONTH`, `FOURTH_OF_MONTH`, `SECOND_AND_FOURTH_OF_MONTH`, `SECOND_OF_MONTH`, `THIRD_OF_MONTH`: + *f = ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EVERY_WEEK", "FIRST_AND_THIRD_OF_MONTH", "FIRST_OF_MONTH", "FOURTH_OF_MONTH", "SECOND_AND_FOURTH_OF_MONTH", "SECOND_OF_MONTH", "THIRD_OF_MONTH"`, v) + } +} + +// Type always returns ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency to satisfy [pflag.Value] interface +func (f *ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency) Type() string { + return "ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency" +} + +type ClusterAutoRestartMessageMaintenanceWindowWindowStartTime struct { + Hours int `json:"hours,omitempty"` + + Minutes int `json:"minutes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClusterAutoRestartMessageMaintenanceWindowWindowStartTime) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClusterAutoRestartMessageMaintenanceWindowWindowStartTime) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// SHIELD feature: CSP +type ComplianceSecurityProfile struct { + // Set by customers when they request Compliance Security Profile (CSP) + ComplianceStandards []ComplianceStandard `json:"compliance_standards,omitempty"` + + IsEnabled bool `json:"is_enabled,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ComplianceSecurityProfile) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ComplianceSecurityProfile) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ComplianceSecurityProfileSetting struct { + // SHIELD feature: CSP + ComplianceSecurityProfileWorkspace ComplianceSecurityProfile `json:"compliance_security_profile_workspace"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ComplianceSecurityProfileSetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ComplianceSecurityProfileSetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Compliance stardard for SHIELD customers +type ComplianceStandard string + +const ComplianceStandardCanadaProtectedB ComplianceStandard = `CANADA_PROTECTED_B` + +const ComplianceStandardCyberEssentialPlus ComplianceStandard = `CYBER_ESSENTIAL_PLUS` + +const ComplianceStandardFedrampHigh ComplianceStandard = `FEDRAMP_HIGH` + +const ComplianceStandardFedrampIl5 ComplianceStandard = `FEDRAMP_IL5` + +const ComplianceStandardFedrampModerate ComplianceStandard = `FEDRAMP_MODERATE` + +const ComplianceStandardHipaa ComplianceStandard = `HIPAA` + +const ComplianceStandardHitrust ComplianceStandard = `HITRUST` + +const ComplianceStandardIrapProtected ComplianceStandard = `IRAP_PROTECTED` + +const ComplianceStandardIsmap ComplianceStandard = `ISMAP` + +const ComplianceStandardItarEar ComplianceStandard = `ITAR_EAR` + +const ComplianceStandardNone ComplianceStandard = `NONE` + +const ComplianceStandardPciDss ComplianceStandard = `PCI_DSS` + +// String representation for [fmt.Print] +func (f *ComplianceStandard) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ComplianceStandard) Set(v string) error { + switch v { + case `CANADA_PROTECTED_B`, `CYBER_ESSENTIAL_PLUS`, `FEDRAMP_HIGH`, `FEDRAMP_IL5`, `FEDRAMP_MODERATE`, `HIPAA`, `HITRUST`, `IRAP_PROTECTED`, `ISMAP`, `ITAR_EAR`, `NONE`, `PCI_DSS`: + *f = ComplianceStandard(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANADA_PROTECTED_B", "CYBER_ESSENTIAL_PLUS", "FEDRAMP_HIGH", "FEDRAMP_IL5", "FEDRAMP_MODERATE", "HIPAA", "HITRUST", "IRAP_PROTECTED", "ISMAP", "ITAR_EAR", "NONE", "PCI_DSS"`, v) + } +} + +// Type always returns ComplianceStandard to satisfy [pflag.Value] interface +func (f *ComplianceStandard) Type() string { + return "ComplianceStandard" +} + +type Config struct { + Email *EmailConfig `json:"email,omitempty"` + + GenericWebhook *GenericWebhookConfig `json:"generic_webhook,omitempty"` + + MicrosoftTeams *MicrosoftTeamsConfig `json:"microsoft_teams,omitempty"` + + Pagerduty *PagerdutyConfig `json:"pagerduty,omitempty"` + + Slack *SlackConfig `json:"slack,omitempty"` +} + +// Details required to configure a block list or allow list. +type CreateIpAccessList struct { + IpAddresses []string `json:"ip_addresses,omitempty"` + // Label for the IP access list. This **cannot** be empty. + Label string `json:"label"` + // Type of IP access list. Valid values are as follows and are + // case-sensitive: + // + // * `ALLOW`: An allow list. Include this IP or range. * `BLOCK`: A block + // list. Exclude this IP or range. IP addresses in the block list are + // excluded even if they are included in an allow list. + ListType ListType `json:"list_type"` +} + +// An IP access list was successfully created. +type CreateIpAccessListResponse struct { + // Definition of an IP Access list + IpAccessList *IpAccessListInfo `json:"ip_access_list,omitempty"` +} + +type CreateNetworkConnectivityConfigRequest struct { + // The name of the network connectivity configuration. The name can contain + // alphanumeric characters, hyphens, and underscores. The length must be + // between 3 and 30 characters. The name must match the regular expression + // `^[0-9a-zA-Z-_]{3,30}$`. + Name string `json:"name"` + // The region for the network connectivity configuration. Only workspaces in + // the same region can be attached to the network connectivity + // configuration. + Region string `json:"region"` +} + +type CreateNotificationDestinationRequest struct { + // The configuration for the notification destination. Must wrap EXACTLY one + // of the nested configs. + Config *Config `json:"config,omitempty"` + // The display name for the notification destination. + DisplayName string `json:"display_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateNotificationDestinationRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateNotificationDestinationRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Configuration details for creating on-behalf tokens. +type CreateOboTokenRequest struct { + // Application ID of the service principal. + ApplicationId string `json:"application_id"` + // Comment that describes the purpose of the token. + Comment string `json:"comment,omitempty"` + // The number of seconds before the token expires. + LifetimeSeconds int64 `json:"lifetime_seconds,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateOboTokenRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateOboTokenRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// An on-behalf token was successfully created for the service principal. +type CreateOboTokenResponse struct { + TokenInfo *TokenInfo `json:"token_info,omitempty"` + // Value of the token. + TokenValue string `json:"token_value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateOboTokenResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateOboTokenResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreatePrivateEndpointRuleRequest struct { + // The sub-resource type (group ID) of the target resource. Note that to + // connect to workspace root storage (root DBFS), you need two endpoints, + // one for `blob` and one for `dfs`. + GroupId CreatePrivateEndpointRuleRequestGroupId `json:"group_id"` + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId string `json:"-" url:"-"` + // The Azure resource ID of the target resource. + ResourceId string `json:"resource_id"` +} + +// The sub-resource type (group ID) of the target resource. Note that to connect +// to workspace root storage (root DBFS), you need two endpoints, one for `blob` +// and one for `dfs`. +type CreatePrivateEndpointRuleRequestGroupId string + +const CreatePrivateEndpointRuleRequestGroupIdBlob CreatePrivateEndpointRuleRequestGroupId = `blob` + +const CreatePrivateEndpointRuleRequestGroupIdDfs CreatePrivateEndpointRuleRequestGroupId = `dfs` + +const CreatePrivateEndpointRuleRequestGroupIdMysqlServer CreatePrivateEndpointRuleRequestGroupId = `mysqlServer` + +const CreatePrivateEndpointRuleRequestGroupIdSqlServer CreatePrivateEndpointRuleRequestGroupId = `sqlServer` + +// String representation for [fmt.Print] +func (f *CreatePrivateEndpointRuleRequestGroupId) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreatePrivateEndpointRuleRequestGroupId) Set(v string) error { + switch v { + case `blob`, `dfs`, `mysqlServer`, `sqlServer`: + *f = CreatePrivateEndpointRuleRequestGroupId(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "blob", "dfs", "mysqlServer", "sqlServer"`, v) + } +} + +// Type always returns CreatePrivateEndpointRuleRequestGroupId to satisfy [pflag.Value] interface +func (f *CreatePrivateEndpointRuleRequestGroupId) Type() string { + return "CreatePrivateEndpointRuleRequestGroupId" +} + +type CreateTokenRequest struct { + // Optional description to attach to the token. + Comment string `json:"comment,omitempty"` + // The lifetime of the token, in seconds. + // + // If the lifetime is not specified, this token remains valid indefinitely. + LifetimeSeconds int64 `json:"lifetime_seconds,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateTokenRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateTokenRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateTokenResponse struct { + // The information for the new token. + TokenInfo *PublicTokenInfo `json:"token_info,omitempty"` + // The value of the new token. + TokenValue string `json:"token_value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateTokenResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateTokenResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Account level policy for CSP +type CspEnablementAccount struct { + // Set by customers when they request Compliance Security Profile (CSP) + // Invariants are enforced in Settings policy. + ComplianceStandards []ComplianceStandard `json:"compliance_standards,omitempty"` + // Enforced = it cannot be overriden at workspace level. + IsEnforced bool `json:"is_enforced,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CspEnablementAccount) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CspEnablementAccount) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CspEnablementAccountSetting struct { + // Account level policy for CSP + CspEnablementAccount CspEnablementAccount `json:"csp_enablement_account"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CspEnablementAccountSetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CspEnablementAccountSetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// This represents the setting configuration for the default namespace in the +// Databricks workspace. Setting the default catalog for the workspace +// determines the catalog that is used when queries do not reference a fully +// qualified 3 level name. For example, if the default catalog is set to +// 'retail_prod' then a query 'SELECT * FROM myTable' would reference the object +// 'retail_prod.default.myTable' (the schema 'default' is always assumed). This +// setting requires a restart of clusters and SQL warehouses to take effect. +// Additionally, the default namespace only applies when using Unity +// Catalog-enabled compute. +type DefaultNamespaceSetting struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + + Namespace StringMessage `json:"namespace"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DefaultNamespaceSetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DefaultNamespaceSetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete the account IP access toggle setting +type DeleteAccountIpAccessEnableRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteAccountIpAccessEnableRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteAccountIpAccessEnableRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeleteAccountIpAccessEnableResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + +// Delete access list +type DeleteAccountIpAccessListRequest struct { + // The ID for the corresponding IP access list + IpAccessListId string `json:"-" url:"-"` +} + +// Delete the AI/BI dashboard embedding access policy +type DeleteAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeleteAibiDashboardEmbeddingAccessPolicySettingResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + +// Delete AI/BI dashboard embedding approved domains +type DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + +// Delete the default namespace setting +type DeleteDefaultNamespaceSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteDefaultNamespaceSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteDefaultNamespaceSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeleteDefaultNamespaceSettingResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + +// Delete Legacy Access Disablement Status +type DeleteDisableLegacyAccessRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteDisableLegacyAccessRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteDisableLegacyAccessRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeleteDisableLegacyAccessResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + +// Delete the disable legacy DBFS setting +type DeleteDisableLegacyDbfsRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteDisableLegacyDbfsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteDisableLegacyDbfsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeleteDisableLegacyDbfsResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + +// Delete the disable legacy features setting +type DeleteDisableLegacyFeaturesRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteDisableLegacyFeaturesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteDisableLegacyFeaturesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeleteDisableLegacyFeaturesResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + +// Delete access list +type DeleteIpAccessListRequest struct { + // The ID for the corresponding IP access list + IpAccessListId string `json:"-" url:"-"` +} + +// Delete a network connectivity configuration +type DeleteNetworkConnectivityConfigurationRequest struct { + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId string `json:"-" url:"-"` +} + +type DeleteNetworkConnectivityConfigurationResponse struct { +} + +// Delete a notification destination +type DeleteNotificationDestinationRequest struct { + Id string `json:"-" url:"-"` +} + +// Delete Personal Compute setting +type DeletePersonalComputeSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeletePersonalComputeSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeletePersonalComputeSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeletePersonalComputeSettingResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + +// Delete a private endpoint rule +type DeletePrivateEndpointRuleRequest struct { + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId string `json:"-" url:"-"` + // Your private endpoint rule ID. + PrivateEndpointRuleId string `json:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete the restrict workspace admins setting +type DeleteRestrictWorkspaceAdminsSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteRestrictWorkspaceAdminsSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteRestrictWorkspaceAdminsSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeleteRestrictWorkspaceAdminsSettingResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + +// Delete a token +type DeleteTokenManagementRequest struct { + // The ID of the token to revoke. + TokenId string `json:"-" url:"-"` +} + +type DestinationType string + +const DestinationTypeEmail DestinationType = `EMAIL` + +const DestinationTypeMicrosoftTeams DestinationType = `MICROSOFT_TEAMS` + +const DestinationTypePagerduty DestinationType = `PAGERDUTY` + +const DestinationTypeSlack DestinationType = `SLACK` + +const DestinationTypeWebhook DestinationType = `WEBHOOK` + +// String representation for [fmt.Print] +func (f *DestinationType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DestinationType) Set(v string) error { + switch v { + case `EMAIL`, `MICROSOFT_TEAMS`, `PAGERDUTY`, `SLACK`, `WEBHOOK`: + *f = DestinationType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EMAIL", "MICROSOFT_TEAMS", "PAGERDUTY", "SLACK", "WEBHOOK"`, v) + } +} + +// Type always returns DestinationType to satisfy [pflag.Value] interface +func (f *DestinationType) Type() string { + return "DestinationType" +} + +type DisableLegacyAccess struct { + DisableLegacyAccess BooleanMessage `json:"disable_legacy_access"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DisableLegacyAccess) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DisableLegacyAccess) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DisableLegacyDbfs struct { + DisableLegacyDbfs BooleanMessage `json:"disable_legacy_dbfs"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DisableLegacyDbfs) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DisableLegacyDbfs) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DisableLegacyFeatures struct { + DisableLegacyFeatures BooleanMessage `json:"disable_legacy_features"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DisableLegacyFeatures) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DisableLegacyFeatures) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EmailConfig struct { + // Email addresses to notify. + Addresses []string `json:"addresses,omitempty"` +} + +type Empty struct { +} + +// SHIELD feature: ESM +type EnhancedSecurityMonitoring struct { + IsEnabled bool `json:"is_enabled,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EnhancedSecurityMonitoring) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnhancedSecurityMonitoring) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EnhancedSecurityMonitoringSetting struct { + // SHIELD feature: ESM + EnhancedSecurityMonitoringWorkspace EnhancedSecurityMonitoring `json:"enhanced_security_monitoring_workspace"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EnhancedSecurityMonitoringSetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnhancedSecurityMonitoringSetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Account level policy for ESM +type EsmEnablementAccount struct { + IsEnforced bool `json:"is_enforced,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EsmEnablementAccount) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EsmEnablementAccount) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EsmEnablementAccountSetting struct { + // Account level policy for ESM + EsmEnablementAccount EsmEnablementAccount `json:"esm_enablement_account"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EsmEnablementAccountSetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EsmEnablementAccountSetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The exchange token is the result of the token exchange with the IdP +type ExchangeToken struct { + // The requested token. + Credential string `json:"credential,omitempty"` + // The end-of-life timestamp of the token. The value is in milliseconds + // since the Unix epoch. + CredentialEolTime int64 `json:"credentialEolTime,omitempty"` + // User ID of the user that owns this token. + OwnerId int64 `json:"ownerId,omitempty"` + // The scopes of access granted in the token. + Scopes []string `json:"scopes,omitempty"` + // The type of this exchange token + TokenType TokenType `json:"tokenType,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExchangeToken) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExchangeToken) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Exchange a token with the IdP +type ExchangeTokenRequest struct { + // The partition of Credentials store + PartitionId PartitionId `json:"partitionId"` + // Array of scopes for the token request. + Scopes []string `json:"scopes"` + // A list of token types being requested + TokenType []TokenType `json:"tokenType"` +} + +// Exhanged tokens were successfully returned. +type ExchangeTokenResponse struct { + Values []ExchangeToken `json:"values,omitempty"` +} + +// An IP access list was successfully returned. +type FetchIpAccessListResponse struct { + // Definition of an IP Access list + IpAccessList *IpAccessListInfo `json:"ip_access_list,omitempty"` +} + +type GenericWebhookConfig struct { + // [Input-Only][Optional] Password for webhook. + Password string `json:"password,omitempty"` + // [Output-Only] Whether password is set. + PasswordSet bool `json:"password_set,omitempty"` + // [Input-Only] URL for webhook. + Url string `json:"url,omitempty"` + // [Output-Only] Whether URL is set. + UrlSet bool `json:"url_set,omitempty"` + // [Input-Only][Optional] Username for webhook. + Username string `json:"username,omitempty"` + // [Output-Only] Whether username is set. + UsernameSet bool `json:"username_set,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GenericWebhookConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenericWebhookConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the account IP access toggle setting +type GetAccountIpAccessEnableRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetAccountIpAccessEnableRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetAccountIpAccessEnableRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get IP access list +type GetAccountIpAccessListRequest struct { + // The ID for the corresponding IP access list + IpAccessListId string `json:"-" url:"-"` +} + +// Retrieve the AI/BI dashboard embedding access policy +type GetAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetAibiDashboardEmbeddingAccessPolicySettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetAibiDashboardEmbeddingAccessPolicySettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Retrieve the list of domains approved to host embedded AI/BI dashboards +type GetAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the automatic cluster update setting +type GetAutomaticClusterUpdateSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetAutomaticClusterUpdateSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetAutomaticClusterUpdateSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the compliance security profile setting +type GetComplianceSecurityProfileSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetComplianceSecurityProfileSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetComplianceSecurityProfileSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the compliance security profile setting for new workspaces +type GetCspEnablementAccountSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetCspEnablementAccountSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetCspEnablementAccountSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the default namespace setting +type GetDefaultNamespaceSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetDefaultNamespaceSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetDefaultNamespaceSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Retrieve Legacy Access Disablement Status +type GetDisableLegacyAccessRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetDisableLegacyAccessRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetDisableLegacyAccessRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the disable legacy DBFS setting +type GetDisableLegacyDbfsRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetDisableLegacyDbfsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetDisableLegacyDbfsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the disable legacy features setting +type GetDisableLegacyFeaturesRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetDisableLegacyFeaturesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetDisableLegacyFeaturesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the enhanced security monitoring setting +type GetEnhancedSecurityMonitoringSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetEnhancedSecurityMonitoringSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetEnhancedSecurityMonitoringSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get the enhanced security monitoring setting for new workspaces +type GetEsmEnablementAccountSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetEsmEnablementAccountSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetEsmEnablementAccountSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get access list +type GetIpAccessListRequest struct { + // The ID for the corresponding IP access list + IpAccessListId string `json:"-" url:"-"` +} + +type GetIpAccessListResponse struct { + // Definition of an IP Access list + IpAccessList *IpAccessListInfo `json:"ip_access_list,omitempty"` +} + +// IP access lists were successfully returned. +type GetIpAccessListsResponse struct { + IpAccessLists []IpAccessListInfo `json:"ip_access_lists,omitempty"` +} + +// Get a network connectivity configuration +type GetNetworkConnectivityConfigurationRequest struct { + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId string `json:"-" url:"-"` +} + +// Get a notification destination +type GetNotificationDestinationRequest struct { + Id string `json:"-" url:"-"` +} + +// Get Personal Compute setting +type GetPersonalComputeSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetPersonalComputeSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetPersonalComputeSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get a private endpoint rule +type GetPrivateEndpointRuleRequest struct { + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId string `json:"-" url:"-"` + // Your private endpoint rule ID. + PrivateEndpointRuleId string `json:"-" url:"-"` +} + +// Get the restrict workspace admins setting +type GetRestrictWorkspaceAdminsSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetRestrictWorkspaceAdminsSettingRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetRestrictWorkspaceAdminsSettingRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Check configuration status +type GetStatusRequest struct { + Keys string `json:"-" url:"keys"` +} + +// Get token info +type GetTokenManagementRequest struct { + // The ID of the token to get. + TokenId string `json:"-" url:"-"` +} + +type GetTokenPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []TokenPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Token with specified Token ID was successfully returned. +type GetTokenResponse struct { + TokenInfo *TokenInfo `json:"token_info,omitempty"` +} + +// Definition of an IP Access list +type IpAccessListInfo struct { + // Total number of IP or CIDR values. + AddressCount int `json:"address_count,omitempty"` + // Creation timestamp in milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // User ID of the user who created this list. + CreatedBy int64 `json:"created_by,omitempty"` + // Specifies whether this IP access list is enabled. + Enabled bool `json:"enabled,omitempty"` + + IpAddresses []string `json:"ip_addresses,omitempty"` + // Label for the IP access list. This **cannot** be empty. + Label string `json:"label,omitempty"` + // Universally unique identifier (UUID) of the IP access list. + ListId string `json:"list_id,omitempty"` + // Type of IP access list. Valid values are as follows and are + // case-sensitive: + // + // * `ALLOW`: An allow list. Include this IP or range. * `BLOCK`: A block + // list. Exclude this IP or range. IP addresses in the block list are + // excluded even if they are included in an allow list. + ListType ListType `json:"list_type,omitempty"` + // Update timestamp in milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // User ID of the user who updated this list. + UpdatedBy int64 `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *IpAccessListInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s IpAccessListInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// IP access lists were successfully returned. +type ListIpAccessListResponse struct { + IpAccessLists []IpAccessListInfo `json:"ip_access_lists,omitempty"` +} + +type ListNccAzurePrivateEndpointRulesResponse struct { + Items []NccAzurePrivateEndpointRule `json:"items,omitempty"` + // A token that can be used to get the next page of results. If null, there + // are no more results to show. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListNccAzurePrivateEndpointRulesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListNccAzurePrivateEndpointRulesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List network connectivity configurations +type ListNetworkConnectivityConfigurationsRequest struct { + // Pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListNetworkConnectivityConfigurationsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListNetworkConnectivityConfigurationsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListNetworkConnectivityConfigurationsResponse struct { + Items []NetworkConnectivityConfiguration `json:"items,omitempty"` + // A token that can be used to get the next page of results. If null, there + // are no more results to show. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListNetworkConnectivityConfigurationsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListNetworkConnectivityConfigurationsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List notification destinations +type ListNotificationDestinationsRequest struct { + PageSize int64 `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListNotificationDestinationsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListNotificationDestinationsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListNotificationDestinationsResponse struct { + // Page token for next of results. + NextPageToken string `json:"next_page_token,omitempty"` + + Results []ListNotificationDestinationsResult `json:"results,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListNotificationDestinationsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListNotificationDestinationsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListNotificationDestinationsResult struct { + // [Output-only] The type of the notification destination. The type can not + // be changed once set. + DestinationType DestinationType `json:"destination_type,omitempty"` + // The display name for the notification destination. + DisplayName string `json:"display_name,omitempty"` + // UUID identifying notification destination. + Id string `json:"id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListNotificationDestinationsResult) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListNotificationDestinationsResult) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List private endpoint rules +type ListPrivateEndpointRulesRequest struct { + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId string `json:"-" url:"-"` + // Pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListPrivateEndpointRulesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListPrivateEndpointRulesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListPublicTokensResponse struct { + // The information for each token. + TokenInfos []PublicTokenInfo `json:"token_infos,omitempty"` +} + +// List all tokens +type ListTokenManagementRequest struct { + // User ID of the user that created the token. + CreatedById int64 `json:"-" url:"created_by_id,omitempty"` + // Username of the user that created the token. + CreatedByUsername string `json:"-" url:"created_by_username,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListTokenManagementRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListTokenManagementRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Tokens were successfully returned. +type ListTokensResponse struct { + // Token metadata of each user-created token in the workspace + TokenInfos []TokenInfo `json:"token_infos,omitempty"` +} + +// Type of IP access list. Valid values are as follows and are case-sensitive: +// +// * `ALLOW`: An allow list. Include this IP or range. * `BLOCK`: A block list. +// Exclude this IP or range. IP addresses in the block list are excluded even if +// they are included in an allow list. +type ListType string + +// An allow list. Include this IP or range. +const ListTypeAllow ListType = `ALLOW` + +// A block list. Exclude this IP or range. IP addresses in the block list are +// excluded even if they are included in an allow list. +const ListTypeBlock ListType = `BLOCK` + +// String representation for [fmt.Print] +func (f *ListType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListType) Set(v string) error { + switch v { + case `ALLOW`, `BLOCK`: + *f = ListType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALLOW", "BLOCK"`, v) + } +} + +// Type always returns ListType to satisfy [pflag.Value] interface +func (f *ListType) Type() string { + return "ListType" +} + +type MicrosoftTeamsConfig struct { + // [Input-Only] URL for Microsoft Teams. + Url string `json:"url,omitempty"` + // [Output-Only] Whether URL is set. + UrlSet bool `json:"url_set,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MicrosoftTeamsConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MicrosoftTeamsConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The stable AWS IP CIDR blocks. You can use these to configure the firewall of +// your resources to allow traffic from your Databricks workspace. +type NccAwsStableIpRule struct { + // The list of stable IP CIDR blocks from which Databricks network traffic + // originates when accessing your resources. + CidrBlocks []string `json:"cidr_blocks,omitempty"` +} + +type NccAzurePrivateEndpointRule struct { + // The current status of this private endpoint. The private endpoint rules + // are effective only if the connection state is `ESTABLISHED`. Remember + // that you must approve new endpoints on your resources in the Azure portal + // before they take effect. + // + // The possible values are: - INIT: (deprecated) The endpoint has been + // created and pending approval. - PENDING: The endpoint has been created + // and pending approval. - ESTABLISHED: The endpoint has been approved and + // is ready to use in your serverless compute resources. - REJECTED: + // Connection was rejected by the private link resource owner. - + // DISCONNECTED: Connection was removed by the private link resource owner, + // the private endpoint becomes informative and should be deleted for + // clean-up. + ConnectionState NccAzurePrivateEndpointRuleConnectionState `json:"connection_state,omitempty"` + // Time in epoch milliseconds when this object was created. + CreationTime int64 `json:"creation_time,omitempty"` + // Whether this private endpoint is deactivated. + Deactivated bool `json:"deactivated,omitempty"` + // Time in epoch milliseconds when this object was deactivated. + DeactivatedAt int64 `json:"deactivated_at,omitempty"` + // The name of the Azure private endpoint resource. + EndpointName string `json:"endpoint_name,omitempty"` + // The sub-resource type (group ID) of the target resource. Note that to + // connect to workspace root storage (root DBFS), you need two endpoints, + // one for `blob` and one for `dfs`. + GroupId NccAzurePrivateEndpointRuleGroupId `json:"group_id,omitempty"` + // The ID of a network connectivity configuration, which is the parent + // resource of this private endpoint rule object. + NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` + // The Azure resource ID of the target resource. + ResourceId string `json:"resource_id,omitempty"` + // The ID of a private endpoint rule. + RuleId string `json:"rule_id,omitempty"` + // Time in epoch milliseconds when this object was updated. + UpdatedTime int64 `json:"updated_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NccAzurePrivateEndpointRule) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NccAzurePrivateEndpointRule) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The current status of this private endpoint. The private endpoint rules are +// effective only if the connection state is `ESTABLISHED`. Remember that you +// must approve new endpoints on your resources in the Azure portal before they +// take effect. +// +// The possible values are: - INIT: (deprecated) The endpoint has been created +// and pending approval. - PENDING: The endpoint has been created and pending +// approval. - ESTABLISHED: The endpoint has been approved and is ready to use +// in your serverless compute resources. - REJECTED: Connection was rejected by +// the private link resource owner. - DISCONNECTED: Connection was removed by +// the private link resource owner, the private endpoint becomes informative and +// should be deleted for clean-up. +type NccAzurePrivateEndpointRuleConnectionState string + +const NccAzurePrivateEndpointRuleConnectionStateDisconnected NccAzurePrivateEndpointRuleConnectionState = `DISCONNECTED` + +const NccAzurePrivateEndpointRuleConnectionStateEstablished NccAzurePrivateEndpointRuleConnectionState = `ESTABLISHED` + +const NccAzurePrivateEndpointRuleConnectionStateInit NccAzurePrivateEndpointRuleConnectionState = `INIT` + +const NccAzurePrivateEndpointRuleConnectionStatePending NccAzurePrivateEndpointRuleConnectionState = `PENDING` + +const NccAzurePrivateEndpointRuleConnectionStateRejected NccAzurePrivateEndpointRuleConnectionState = `REJECTED` + +// String representation for [fmt.Print] +func (f *NccAzurePrivateEndpointRuleConnectionState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *NccAzurePrivateEndpointRuleConnectionState) Set(v string) error { + switch v { + case `DISCONNECTED`, `ESTABLISHED`, `INIT`, `PENDING`, `REJECTED`: + *f = NccAzurePrivateEndpointRuleConnectionState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "ESTABLISHED", "INIT", "PENDING", "REJECTED"`, v) + } +} + +// Type always returns NccAzurePrivateEndpointRuleConnectionState to satisfy [pflag.Value] interface +func (f *NccAzurePrivateEndpointRuleConnectionState) Type() string { + return "NccAzurePrivateEndpointRuleConnectionState" +} + +// The sub-resource type (group ID) of the target resource. Note that to connect +// to workspace root storage (root DBFS), you need two endpoints, one for `blob` +// and one for `dfs`. +type NccAzurePrivateEndpointRuleGroupId string + +const NccAzurePrivateEndpointRuleGroupIdBlob NccAzurePrivateEndpointRuleGroupId = `blob` + +const NccAzurePrivateEndpointRuleGroupIdDfs NccAzurePrivateEndpointRuleGroupId = `dfs` + +const NccAzurePrivateEndpointRuleGroupIdMysqlServer NccAzurePrivateEndpointRuleGroupId = `mysqlServer` + +const NccAzurePrivateEndpointRuleGroupIdSqlServer NccAzurePrivateEndpointRuleGroupId = `sqlServer` + +// String representation for [fmt.Print] +func (f *NccAzurePrivateEndpointRuleGroupId) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *NccAzurePrivateEndpointRuleGroupId) Set(v string) error { + switch v { + case `blob`, `dfs`, `mysqlServer`, `sqlServer`: + *f = NccAzurePrivateEndpointRuleGroupId(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "blob", "dfs", "mysqlServer", "sqlServer"`, v) + } +} + +// Type always returns NccAzurePrivateEndpointRuleGroupId to satisfy [pflag.Value] interface +func (f *NccAzurePrivateEndpointRuleGroupId) Type() string { + return "NccAzurePrivateEndpointRuleGroupId" +} + +// The stable Azure service endpoints. You can configure the firewall of your +// Azure resources to allow traffic from your Databricks serverless compute +// resources. +type NccAzureServiceEndpointRule struct { + // The list of subnets from which Databricks network traffic originates when + // accessing your Azure resources. + Subnets []string `json:"subnets,omitempty"` + // The Azure region in which this service endpoint rule applies. + TargetRegion string `json:"target_region,omitempty"` + // The Azure services to which this service endpoint rule applies to. + TargetServices []string `json:"target_services,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NccAzureServiceEndpointRule) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NccAzureServiceEndpointRule) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The network connectivity rules that apply to network traffic from your +// serverless compute resources. +type NccEgressConfig struct { + // The network connectivity rules that are applied by default without + // resource specific configurations. You can find the stable network + // information of your serverless compute resources here. + DefaultRules *NccEgressDefaultRules `json:"default_rules,omitempty"` + // The network connectivity rules that configured for each destinations. + // These rules override default rules. + TargetRules *NccEgressTargetRules `json:"target_rules,omitempty"` +} + +// The network connectivity rules that are applied by default without resource +// specific configurations. You can find the stable network information of your +// serverless compute resources here. +type NccEgressDefaultRules struct { + // The stable AWS IP CIDR blocks. You can use these to configure the + // firewall of your resources to allow traffic from your Databricks + // workspace. + AwsStableIpRule *NccAwsStableIpRule `json:"aws_stable_ip_rule,omitempty"` + // The stable Azure service endpoints. You can configure the firewall of + // your Azure resources to allow traffic from your Databricks serverless + // compute resources. + AzureServiceEndpointRule *NccAzureServiceEndpointRule `json:"azure_service_endpoint_rule,omitempty"` +} + +// The network connectivity rules that configured for each destinations. These +// rules override default rules. +type NccEgressTargetRules struct { + AzurePrivateEndpointRules []NccAzurePrivateEndpointRule `json:"azure_private_endpoint_rules,omitempty"` +} + +type NetworkConnectivityConfiguration struct { + // The Databricks account ID that hosts the credential. + AccountId string `json:"account_id,omitempty"` + // Time in epoch milliseconds when this object was created. + CreationTime int64 `json:"creation_time,omitempty"` + // The network connectivity rules that apply to network traffic from your + // serverless compute resources. + EgressConfig *NccEgressConfig `json:"egress_config,omitempty"` + // The name of the network connectivity configuration. The name can contain + // alphanumeric characters, hyphens, and underscores. The length must be + // between 3 and 30 characters. The name must match the regular expression + // `^[0-9a-zA-Z-_]{3,30}$`. + Name string `json:"name,omitempty"` + // Databricks network connectivity configuration ID. + NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` + // The region for the network connectivity configuration. Only workspaces in + // the same region can be attached to the network connectivity + // configuration. + Region string `json:"region,omitempty"` + // Time in epoch milliseconds when this object was updated. + UpdatedTime int64 `json:"updated_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NetworkConnectivityConfiguration) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NetworkConnectivityConfiguration) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type NotificationDestination struct { + // The configuration for the notification destination. Will be exactly one + // of the nested configs. Only returns for users with workspace admin + // permissions. + Config *Config `json:"config,omitempty"` + // [Output-only] The type of the notification destination. The type can not + // be changed once set. + DestinationType DestinationType `json:"destination_type,omitempty"` + // The display name for the notification destination. + DisplayName string `json:"display_name,omitempty"` + // UUID identifying notification destination. + Id string `json:"id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NotificationDestination) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NotificationDestination) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PagerdutyConfig struct { + // [Input-Only] Integration key for PagerDuty. + IntegrationKey string `json:"integration_key,omitempty"` + // [Output-Only] Whether integration key is set. + IntegrationKeySet bool `json:"integration_key_set,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PagerdutyConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PagerdutyConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Partition by workspace or account +type PartitionId struct { + // The ID of the workspace. + WorkspaceId int64 `json:"workspaceId,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PartitionId) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PartitionId) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PersonalComputeMessage struct { + // ON: Grants all users in all workspaces access to the Personal Compute + // default policy, allowing all users to create single-machine compute + // resources. DELEGATE: Moves access control for the Personal Compute + // default policy to individual workspaces and requires a workspace’s + // users or groups to be added to the ACLs of that workspace’s Personal + // Compute default policy before they will be able to create compute + // resources through that policy. + Value PersonalComputeMessageEnum `json:"value"` +} + +// ON: Grants all users in all workspaces access to the Personal Compute default +// policy, allowing all users to create single-machine compute resources. +// DELEGATE: Moves access control for the Personal Compute default policy to +// individual workspaces and requires a workspace’s users or groups to be +// added to the ACLs of that workspace’s Personal Compute default policy +// before they will be able to create compute resources through that policy. +type PersonalComputeMessageEnum string + +const PersonalComputeMessageEnumDelegate PersonalComputeMessageEnum = `DELEGATE` + +const PersonalComputeMessageEnumOn PersonalComputeMessageEnum = `ON` + +// String representation for [fmt.Print] +func (f *PersonalComputeMessageEnum) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PersonalComputeMessageEnum) Set(v string) error { + switch v { + case `DELEGATE`, `ON`: + *f = PersonalComputeMessageEnum(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELEGATE", "ON"`, v) + } +} + +// Type always returns PersonalComputeMessageEnum to satisfy [pflag.Value] interface +func (f *PersonalComputeMessageEnum) Type() string { + return "PersonalComputeMessageEnum" +} + +type PersonalComputeSetting struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + + PersonalCompute PersonalComputeMessage `json:"personal_compute"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PersonalComputeSetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PersonalComputeSetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PublicTokenInfo struct { + // Comment the token was created with, if applicable. + Comment string `json:"comment,omitempty"` + // Server time (in epoch milliseconds) when the token was created. + CreationTime int64 `json:"creation_time,omitempty"` + // Server time (in epoch milliseconds) when the token will expire, or -1 if + // not applicable. + ExpiryTime int64 `json:"expiry_time,omitempty"` + // The ID of this token. + TokenId string `json:"token_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PublicTokenInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PublicTokenInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Details required to replace an IP access list. +type ReplaceIpAccessList struct { + // Specifies whether this IP access list is enabled. + Enabled bool `json:"enabled"` + // The ID for the corresponding IP access list + IpAccessListId string `json:"-" url:"-"` + + IpAddresses []string `json:"ip_addresses,omitempty"` + // Label for the IP access list. This **cannot** be empty. + Label string `json:"label"` + // Type of IP access list. Valid values are as follows and are + // case-sensitive: + // + // * `ALLOW`: An allow list. Include this IP or range. * `BLOCK`: A block + // list. Exclude this IP or range. IP addresses in the block list are + // excluded even if they are included in an allow list. + ListType ListType `json:"list_type"` +} + +type ReplaceResponse struct { +} + +type RestrictWorkspaceAdminsMessage struct { + Status RestrictWorkspaceAdminsMessageStatus `json:"status"` +} + +type RestrictWorkspaceAdminsMessageStatus string + +const RestrictWorkspaceAdminsMessageStatusAllowAll RestrictWorkspaceAdminsMessageStatus = `ALLOW_ALL` + +const RestrictWorkspaceAdminsMessageStatusRestrictTokensAndJobRunAs RestrictWorkspaceAdminsMessageStatus = `RESTRICT_TOKENS_AND_JOB_RUN_AS` + +// String representation for [fmt.Print] +func (f *RestrictWorkspaceAdminsMessageStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RestrictWorkspaceAdminsMessageStatus) Set(v string) error { + switch v { + case `ALLOW_ALL`, `RESTRICT_TOKENS_AND_JOB_RUN_AS`: + *f = RestrictWorkspaceAdminsMessageStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALLOW_ALL", "RESTRICT_TOKENS_AND_JOB_RUN_AS"`, v) + } +} + +// Type always returns RestrictWorkspaceAdminsMessageStatus to satisfy [pflag.Value] interface +func (f *RestrictWorkspaceAdminsMessageStatus) Type() string { + return "RestrictWorkspaceAdminsMessageStatus" +} + +type RestrictWorkspaceAdminsSetting struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + + RestrictWorkspaceAdmins RestrictWorkspaceAdminsMessage `json:"restrict_workspace_admins"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RestrictWorkspaceAdminsSetting) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RestrictWorkspaceAdminsSetting) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RevokeTokenRequest struct { + // The ID of the token to be revoked. + TokenId string `json:"token_id"` +} + +type RevokeTokenResponse struct { +} + +type SetStatusResponse struct { +} + +type SlackConfig struct { + // [Input-Only] URL for Slack destination. + Url string `json:"url,omitempty"` + // [Output-Only] Whether URL is set. + UrlSet bool `json:"url_set,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SlackConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SlackConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type StringMessage struct { + // Represents a generic string value. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *StringMessage) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s StringMessage) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TokenAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel TokenPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TokenAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TokenAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TokenAccessControlResponse struct { + // All permissions. + AllPermissions []TokenPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TokenAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TokenAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TokenInfo struct { + // Comment that describes the purpose of the token, specified by the token + // creator. + Comment string `json:"comment,omitempty"` + // User ID of the user that created the token. + CreatedById int64 `json:"created_by_id,omitempty"` + // Username of the user that created the token. + CreatedByUsername string `json:"created_by_username,omitempty"` + // Timestamp when the token was created. + CreationTime int64 `json:"creation_time,omitempty"` + // Timestamp when the token expires. + ExpiryTime int64 `json:"expiry_time,omitempty"` + // Approximate timestamp for the day the token was last used. Accurate up to + // 1 day. + LastUsedDay int64 `json:"last_used_day,omitempty"` + // User ID of the user that owns the token. + OwnerId int64 `json:"owner_id,omitempty"` + // ID of the token. + TokenId string `json:"token_id,omitempty"` + // If applicable, the ID of the workspace that the token was created in. + WorkspaceId int64 `json:"workspace_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TokenInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TokenInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TokenPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel TokenPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TokenPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TokenPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type TokenPermissionLevel string + +const TokenPermissionLevelCanUse TokenPermissionLevel = `CAN_USE` + +// String representation for [fmt.Print] +func (f *TokenPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TokenPermissionLevel) Set(v string) error { + switch v { + case `CAN_USE`: + *f = TokenPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_USE"`, v) + } +} + +// Type always returns TokenPermissionLevel to satisfy [pflag.Value] interface +func (f *TokenPermissionLevel) Type() string { + return "TokenPermissionLevel" +} + +type TokenPermissions struct { + AccessControlList []TokenAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TokenPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TokenPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TokenPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel TokenPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TokenPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TokenPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TokenPermissionsRequest struct { + AccessControlList []TokenAccessControlRequest `json:"access_control_list,omitempty"` +} + +// The type of token request. As of now, only `AZURE_ACTIVE_DIRECTORY_TOKEN` is +// supported. +type TokenType string + +const TokenTypeArclightAzureExchangeToken TokenType = `ARCLIGHT_AZURE_EXCHANGE_TOKEN` + +const TokenTypeArclightAzureExchangeTokenWithUserDelegationKey TokenType = `ARCLIGHT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY` + +const TokenTypeAzureActiveDirectoryToken TokenType = `AZURE_ACTIVE_DIRECTORY_TOKEN` + +// String representation for [fmt.Print] +func (f *TokenType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TokenType) Set(v string) error { + switch v { + case `ARCLIGHT_AZURE_EXCHANGE_TOKEN`, `ARCLIGHT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY`, `AZURE_ACTIVE_DIRECTORY_TOKEN`: + *f = TokenType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARCLIGHT_AZURE_EXCHANGE_TOKEN", "ARCLIGHT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY", "AZURE_ACTIVE_DIRECTORY_TOKEN"`, v) + } +} + +// Type always returns TokenType to satisfy [pflag.Value] interface +func (f *TokenType) Type() string { + return "TokenType" +} + +// Details required to update a setting. +type UpdateAccountIpAccessEnableRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting AccountIpAccessEnable `json:"setting"` +} + +// Details required to update a setting. +type UpdateAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting AibiDashboardEmbeddingAccessPolicySetting `json:"setting"` +} + +// Details required to update a setting. +type UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting AibiDashboardEmbeddingApprovedDomainsSetting `json:"setting"` +} + +// Details required to update a setting. +type UpdateAutomaticClusterUpdateSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting AutomaticClusterUpdateSetting `json:"setting"` +} + +// Details required to update a setting. +type UpdateComplianceSecurityProfileSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting ComplianceSecurityProfileSetting `json:"setting"` +} + +// Details required to update a setting. +type UpdateCspEnablementAccountSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting CspEnablementAccountSetting `json:"setting"` +} + +// Details required to update a setting. +type UpdateDefaultNamespaceSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + // This represents the setting configuration for the default namespace in + // the Databricks workspace. Setting the default catalog for the workspace + // determines the catalog that is used when queries do not reference a fully + // qualified 3 level name. For example, if the default catalog is set to + // 'retail_prod' then a query 'SELECT * FROM myTable' would reference the + // object 'retail_prod.default.myTable' (the schema 'default' is always + // assumed). This setting requires a restart of clusters and SQL warehouses + // to take effect. Additionally, the default namespace only applies when + // using Unity Catalog-enabled compute. + Setting DefaultNamespaceSetting `json:"setting"` +} + +// Details required to update a setting. +type UpdateDisableLegacyAccessRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting DisableLegacyAccess `json:"setting"` +} + +// Details required to update a setting. +type UpdateDisableLegacyDbfsRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting DisableLegacyDbfs `json:"setting"` +} + +// Details required to update a setting. +type UpdateDisableLegacyFeaturesRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting DisableLegacyFeatures `json:"setting"` +} + +// Details required to update a setting. +type UpdateEnhancedSecurityMonitoringSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting EnhancedSecurityMonitoringSetting `json:"setting"` +} + +// Details required to update a setting. +type UpdateEsmEnablementAccountSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting EsmEnablementAccountSetting `json:"setting"` +} + +// Details required to update an IP access list. +type UpdateIpAccessList struct { + // Specifies whether this IP access list is enabled. + Enabled bool `json:"enabled,omitempty"` + // The ID for the corresponding IP access list + IpAccessListId string `json:"-" url:"-"` + + IpAddresses []string `json:"ip_addresses,omitempty"` + // Label for the IP access list. This **cannot** be empty. + Label string `json:"label,omitempty"` + // Type of IP access list. Valid values are as follows and are + // case-sensitive: + // + // * `ALLOW`: An allow list. Include this IP or range. * `BLOCK`: A block + // list. Exclude this IP or range. IP addresses in the block list are + // excluded even if they are included in an allow list. + ListType ListType `json:"list_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateIpAccessList) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateIpAccessList) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateNotificationDestinationRequest struct { + // The configuration for the notification destination. Must wrap EXACTLY one + // of the nested configs. + Config *Config `json:"config,omitempty"` + // The display name for the notification destination. + DisplayName string `json:"display_name,omitempty"` + // UUID identifying notification destination. + Id string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateNotificationDestinationRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateNotificationDestinationRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Details required to update a setting. +type UpdatePersonalComputeSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting PersonalComputeSetting `json:"setting"` +} + +type UpdateResponse struct { +} + +// Details required to update a setting. +type UpdateRestrictWorkspaceAdminsSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting RestrictWorkspaceAdminsSetting `json:"setting"` +} + +type WorkspaceConf map[string]string diff --git a/sharing/v2/model.go b/sharing/v2/model.go index bce3827a2..b0d2e4fac 100755 --- a/sharing/v2/model.go +++ b/sharing/v2/model.go @@ -420,11 +420,11 @@ func (f *PartitionValueOp) Type() string { type PermissionsChange struct { // The set of privileges to add. - Add []Privilege `json:"add,omitempty"` + Add []SharingPrivilege `json:"add,omitempty"` // The principal whose privileges we are changing. Principal string `json:"principal,omitempty"` // The set of privileges to remove. - Remove []Privilege `json:"remove,omitempty"` + Remove []SharingPrivilege `json:"remove,omitempty"` ForceSendFields []string `json:"-"` } @@ -439,7 +439,7 @@ func (s PermissionsChange) MarshalJSON() ([]byte, error) { type PermissionsList struct { // The privileges assigned to each principal - PrivilegeAssignments []PrivilegeAssignment `json:"privilege_assignments,omitempty"` + PrivilegeAssignments []SharingPrivilegeAssignment `json:"privilege_assignments,omitempty"` } type Privilege string @@ -1063,6 +1063,136 @@ func (f *SharedDataObjectUpdateAction) Type() string { return "SharedDataObjectUpdateAction" } +type SharingPrivilege string + +const SharingPrivilegeAccess SharingPrivilege = `ACCESS` + +const SharingPrivilegeAllPrivileges SharingPrivilege = `ALL_PRIVILEGES` + +const SharingPrivilegeApplyTag SharingPrivilege = `APPLY_TAG` + +const SharingPrivilegeCreate SharingPrivilege = `CREATE` + +const SharingPrivilegeCreateCatalog SharingPrivilege = `CREATE_CATALOG` + +const SharingPrivilegeCreateConnection SharingPrivilege = `CREATE_CONNECTION` + +const SharingPrivilegeCreateExternalLocation SharingPrivilege = `CREATE_EXTERNAL_LOCATION` + +const SharingPrivilegeCreateExternalTable SharingPrivilege = `CREATE_EXTERNAL_TABLE` + +const SharingPrivilegeCreateExternalVolume SharingPrivilege = `CREATE_EXTERNAL_VOLUME` + +const SharingPrivilegeCreateForeignCatalog SharingPrivilege = `CREATE_FOREIGN_CATALOG` + +const SharingPrivilegeCreateForeignSecurable SharingPrivilege = `CREATE_FOREIGN_SECURABLE` + +const SharingPrivilegeCreateFunction SharingPrivilege = `CREATE_FUNCTION` + +const SharingPrivilegeCreateManagedStorage SharingPrivilege = `CREATE_MANAGED_STORAGE` + +const SharingPrivilegeCreateMaterializedView SharingPrivilege = `CREATE_MATERIALIZED_VIEW` + +const SharingPrivilegeCreateModel SharingPrivilege = `CREATE_MODEL` + +const SharingPrivilegeCreateProvider SharingPrivilege = `CREATE_PROVIDER` + +const SharingPrivilegeCreateRecipient SharingPrivilege = `CREATE_RECIPIENT` + +const SharingPrivilegeCreateSchema SharingPrivilege = `CREATE_SCHEMA` + +const SharingPrivilegeCreateServiceCredential SharingPrivilege = `CREATE_SERVICE_CREDENTIAL` + +const SharingPrivilegeCreateShare SharingPrivilege = `CREATE_SHARE` + +const SharingPrivilegeCreateStorageCredential SharingPrivilege = `CREATE_STORAGE_CREDENTIAL` + +const SharingPrivilegeCreateTable SharingPrivilege = `CREATE_TABLE` + +const SharingPrivilegeCreateView SharingPrivilege = `CREATE_VIEW` + +const SharingPrivilegeCreateVolume SharingPrivilege = `CREATE_VOLUME` + +const SharingPrivilegeExecute SharingPrivilege = `EXECUTE` + +const SharingPrivilegeManage SharingPrivilege = `MANAGE` + +const SharingPrivilegeManageAllowlist SharingPrivilege = `MANAGE_ALLOWLIST` + +const SharingPrivilegeModify SharingPrivilege = `MODIFY` + +const SharingPrivilegeReadFiles SharingPrivilege = `READ_FILES` + +const SharingPrivilegeReadPrivateFiles SharingPrivilege = `READ_PRIVATE_FILES` + +const SharingPrivilegeReadVolume SharingPrivilege = `READ_VOLUME` + +const SharingPrivilegeRefresh SharingPrivilege = `REFRESH` + +const SharingPrivilegeSelect SharingPrivilege = `SELECT` + +const SharingPrivilegeSetSharePermission SharingPrivilege = `SET_SHARE_PERMISSION` + +const SharingPrivilegeUsage SharingPrivilege = `USAGE` + +const SharingPrivilegeUseCatalog SharingPrivilege = `USE_CATALOG` + +const SharingPrivilegeUseConnection SharingPrivilege = `USE_CONNECTION` + +const SharingPrivilegeUseMarketplaceAssets SharingPrivilege = `USE_MARKETPLACE_ASSETS` + +const SharingPrivilegeUseProvider SharingPrivilege = `USE_PROVIDER` + +const SharingPrivilegeUseRecipient SharingPrivilege = `USE_RECIPIENT` + +const SharingPrivilegeUseSchema SharingPrivilege = `USE_SCHEMA` + +const SharingPrivilegeUseShare SharingPrivilege = `USE_SHARE` + +const SharingPrivilegeWriteFiles SharingPrivilege = `WRITE_FILES` + +const SharingPrivilegeWritePrivateFiles SharingPrivilege = `WRITE_PRIVATE_FILES` + +const SharingPrivilegeWriteVolume SharingPrivilege = `WRITE_VOLUME` + +// String representation for [fmt.Print] +func (f *SharingPrivilege) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SharingPrivilege) Set(v string) error { + switch v { + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + *f = SharingPrivilege(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + } +} + +// Type always returns SharingPrivilege to satisfy [pflag.Value] interface +func (f *SharingPrivilege) Type() string { + return "SharingPrivilege" +} + +type SharingPrivilegeAssignment struct { + // The principal (user email address or group name). + Principal string `json:"principal,omitempty"` + // The privileges assigned to the principal. + Privileges []SharingPrivilege `json:"privileges,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SharingPrivilegeAssignment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SharingPrivilegeAssignment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type UpdatePermissionsResponse struct { } diff --git a/sharing/v2preview/api.go b/sharing/v2preview/api.go new file mode 100755 index 000000000..751bb01c1 --- /dev/null +++ b/sharing/v2preview/api.go @@ -0,0 +1,532 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Providers Preview, Recipient Activation Preview, Recipients Preview, Shares Preview, etc. +package sharingpreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type ProvidersPreviewInterface interface { + + // Create an auth provider. + // + // Creates a new authentication provider minimally based on a name and + // authentication type. The caller must be an admin on the metastore. + Create(ctx context.Context, request CreateProvider) (*ProviderInfo, error) + + // Delete a provider. + // + // Deletes an authentication provider, if the caller is a metastore admin or is + // the owner of the provider. + Delete(ctx context.Context, request DeleteProviderRequest) error + + // Delete a provider. + // + // Deletes an authentication provider, if the caller is a metastore admin or is + // the owner of the provider. + DeleteByName(ctx context.Context, name string) error + + // Get a provider. + // + // Gets a specific authentication provider. The caller must supply the name of + // the provider, and must either be a metastore admin or the owner of the + // provider. + Get(ctx context.Context, request GetProviderRequest) (*ProviderInfo, error) + + // Get a provider. + // + // Gets a specific authentication provider. The caller must supply the name of + // the provider, and must either be a metastore admin or the owner of the + // provider. + GetByName(ctx context.Context, name string) (*ProviderInfo, error) + + // List providers. + // + // Gets an array of available authentication providers. The caller must either + // be a metastore admin or the owner of the providers. Providers not owned by + // the caller are not included in the response. There is no guarantee of a + // specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] + + // List providers. + // + // Gets an array of available authentication providers. The caller must either + // be a metastore admin or the owner of the providers. Providers not owned by + // the caller are not included in the response. There is no guarantee of a + // specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) + + // ProviderInfoNameToMetastoreIdMap calls [ProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].MetastoreId as value. + // + // Returns an error if there's more than one [ProviderInfo] with the same .Name. + // + // Note: All [ProviderInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ProviderInfoNameToMetastoreIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) + + // List shares by Provider. + // + // Gets an array of a specified provider's shares within the metastore where: + // + // * the caller is a metastore admin, or * the caller is the owner. + // + // This method is generated by Databricks SDK Code Generator. + ListShares(ctx context.Context, request ListSharesRequest) listing.Iterator[ProviderShare] + + // List shares by Provider. + // + // Gets an array of a specified provider's shares within the metastore where: + // + // * the caller is a metastore admin, or * the caller is the owner. + // + // This method is generated by Databricks SDK Code Generator. + ListSharesAll(ctx context.Context, request ListSharesRequest) ([]ProviderShare, error) + + // List shares by Provider. + // + // Gets an array of a specified provider's shares within the metastore where: + // + // * the caller is a metastore admin, or * the caller is the owner. + ListSharesByName(ctx context.Context, name string) (*ListProviderSharesResponse, error) + + // Update a provider. + // + // Updates the information for an authentication provider, if the caller is a + // metastore admin or is the owner of the provider. If the update changes the + // provider name, the caller must be both a metastore admin and the owner of the + // provider. + Update(ctx context.Context, request UpdateProvider) (*ProviderInfo, error) +} + +func NewProvidersPreview(client *client.DatabricksClient) *ProvidersPreviewAPI { + return &ProvidersPreviewAPI{ + providersPreviewImpl: providersPreviewImpl{ + client: client, + }, + } +} + +// A data provider is an object representing the organization in the real world +// who shares the data. A provider contains shares which further contain the +// shared data. +type ProvidersPreviewAPI struct { + providersPreviewImpl +} + +// Delete a provider. +// +// Deletes an authentication provider, if the caller is a metastore admin or is +// the owner of the provider. +func (a *ProvidersPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.providersPreviewImpl.Delete(ctx, DeleteProviderRequest{ + Name: name, + }) +} + +// Get a provider. +// +// Gets a specific authentication provider. The caller must supply the name of +// the provider, and must either be a metastore admin or the owner of the +// provider. +func (a *ProvidersPreviewAPI) GetByName(ctx context.Context, name string) (*ProviderInfo, error) { + return a.providersPreviewImpl.Get(ctx, GetProviderRequest{ + Name: name, + }) +} + +// ProviderInfoNameToMetastoreIdMap calls [ProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].MetastoreId as value. +// +// Returns an error if there's more than one [ProviderInfo] with the same .Name. +// +// Note: All [ProviderInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ProvidersPreviewAPI) ProviderInfoNameToMetastoreIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.MetastoreId + } + return mapping, nil +} + +// List shares by Provider. +// +// Gets an array of a specified provider's shares within the metastore where: +// +// * the caller is a metastore admin, or * the caller is the owner. +func (a *ProvidersPreviewAPI) ListSharesByName(ctx context.Context, name string) (*ListProviderSharesResponse, error) { + return a.providersPreviewImpl.internalListShares(ctx, ListSharesRequest{ + Name: name, + }) +} + +type RecipientActivationPreviewInterface interface { + + // Get a share activation URL. + // + // Gets an activation URL for a share. + GetActivationUrlInfo(ctx context.Context, request GetActivationUrlInfoRequest) error + + // Get a share activation URL. + // + // Gets an activation URL for a share. + GetActivationUrlInfoByActivationUrl(ctx context.Context, activationUrl string) error + + // Get an access token. + // + // Retrieve access token with an activation url. This is a public API without + // any authentication. + RetrieveToken(ctx context.Context, request RetrieveTokenRequest) (*RetrieveTokenResponse, error) + + // Get an access token. + // + // Retrieve access token with an activation url. This is a public API without + // any authentication. + RetrieveTokenByActivationUrl(ctx context.Context, activationUrl string) (*RetrieveTokenResponse, error) +} + +func NewRecipientActivationPreview(client *client.DatabricksClient) *RecipientActivationPreviewAPI { + return &RecipientActivationPreviewAPI{ + recipientActivationPreviewImpl: recipientActivationPreviewImpl{ + client: client, + }, + } +} + +// The Recipient Activation API is only applicable in the open sharing model +// where the recipient object has the authentication type of `TOKEN`. The data +// recipient follows the activation link shared by the data provider to download +// the credential file that includes the access token. The recipient will then +// use the credential file to establish a secure connection with the provider to +// receive the shared data. +// +// Note that you can download the credential file only once. Recipients should +// treat the downloaded credential as a secret and must not share it outside of +// their organization. +type RecipientActivationPreviewAPI struct { + recipientActivationPreviewImpl +} + +// Get a share activation URL. +// +// Gets an activation URL for a share. +func (a *RecipientActivationPreviewAPI) GetActivationUrlInfoByActivationUrl(ctx context.Context, activationUrl string) error { + return a.recipientActivationPreviewImpl.GetActivationUrlInfo(ctx, GetActivationUrlInfoRequest{ + ActivationUrl: activationUrl, + }) +} + +// Get an access token. +// +// Retrieve access token with an activation url. This is a public API without +// any authentication. +func (a *RecipientActivationPreviewAPI) RetrieveTokenByActivationUrl(ctx context.Context, activationUrl string) (*RetrieveTokenResponse, error) { + return a.recipientActivationPreviewImpl.RetrieveToken(ctx, RetrieveTokenRequest{ + ActivationUrl: activationUrl, + }) +} + +type RecipientsPreviewInterface interface { + + // Create a share recipient. + // + // Creates a new recipient with the delta sharing authentication type in the + // metastore. The caller must be a metastore admin or have the + // **CREATE_RECIPIENT** privilege on the metastore. + Create(ctx context.Context, request CreateRecipient) (*RecipientInfo, error) + + // Delete a share recipient. + // + // Deletes the specified recipient from the metastore. The caller must be the + // owner of the recipient. + Delete(ctx context.Context, request DeleteRecipientRequest) error + + // Delete a share recipient. + // + // Deletes the specified recipient from the metastore. The caller must be the + // owner of the recipient. + DeleteByName(ctx context.Context, name string) error + + // Get a share recipient. + // + // Gets a share recipient from the metastore if: + // + // * the caller is the owner of the share recipient, or: * is a metastore admin + Get(ctx context.Context, request GetRecipientRequest) (*RecipientInfo, error) + + // Get a share recipient. + // + // Gets a share recipient from the metastore if: + // + // * the caller is the owner of the share recipient, or: * is a metastore admin + GetByName(ctx context.Context, name string) (*RecipientInfo, error) + + // List share recipients. + // + // Gets an array of all share recipients within the current metastore where: + // + // * the caller is a metastore admin, or * the caller is the owner. There is no + // guarantee of a specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListRecipientsRequest) listing.Iterator[RecipientInfo] + + // List share recipients. + // + // Gets an array of all share recipients within the current metastore where: + // + // * the caller is a metastore admin, or * the caller is the owner. There is no + // guarantee of a specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListRecipientsRequest) ([]RecipientInfo, error) + + // Rotate a token. + // + // Refreshes the specified recipient's delta sharing authentication token with + // the provided token info. The caller must be the owner of the recipient. + RotateToken(ctx context.Context, request RotateRecipientToken) (*RecipientInfo, error) + + // Get recipient share permissions. + // + // Gets the share permissions for the specified Recipient. The caller must be a + // metastore admin or the owner of the Recipient. + SharePermissions(ctx context.Context, request SharePermissionsRequest) (*GetRecipientSharePermissionsResponse, error) + + // Get recipient share permissions. + // + // Gets the share permissions for the specified Recipient. The caller must be a + // metastore admin or the owner of the Recipient. + SharePermissionsByName(ctx context.Context, name string) (*GetRecipientSharePermissionsResponse, error) + + // Update a share recipient. + // + // Updates an existing recipient in the metastore. The caller must be a + // metastore admin or the owner of the recipient. If the recipient name will be + // updated, the user must be both a metastore admin and the owner of the + // recipient. + Update(ctx context.Context, request UpdateRecipient) (*RecipientInfo, error) +} + +func NewRecipientsPreview(client *client.DatabricksClient) *RecipientsPreviewAPI { + return &RecipientsPreviewAPI{ + recipientsPreviewImpl: recipientsPreviewImpl{ + client: client, + }, + } +} + +// A recipient is an object you create using :method:recipients/create to +// represent an organization which you want to allow access shares. The way how +// sharing works differs depending on whether or not your recipient has access +// to a Databricks workspace that is enabled for Unity Catalog: +// +// - For recipients with access to a Databricks workspace that is enabled for +// Unity Catalog, you can create a recipient object along with a unique sharing +// identifier you get from the recipient. The sharing identifier is the key +// identifier that enables the secure connection. This sharing mode is called +// **Databricks-to-Databricks sharing**. +// +// - For recipients without access to a Databricks workspace that is enabled for +// Unity Catalog, when you create a recipient object, Databricks generates an +// activation link you can send to the recipient. The recipient follows the +// activation link to download the credential file, and then uses the credential +// file to establish a secure connection to receive the shared data. This +// sharing mode is called **open sharing**. +type RecipientsPreviewAPI struct { + recipientsPreviewImpl +} + +// Delete a share recipient. +// +// Deletes the specified recipient from the metastore. The caller must be the +// owner of the recipient. +func (a *RecipientsPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.recipientsPreviewImpl.Delete(ctx, DeleteRecipientRequest{ + Name: name, + }) +} + +// Get a share recipient. +// +// Gets a share recipient from the metastore if: +// +// * the caller is the owner of the share recipient, or: * is a metastore admin +func (a *RecipientsPreviewAPI) GetByName(ctx context.Context, name string) (*RecipientInfo, error) { + return a.recipientsPreviewImpl.Get(ctx, GetRecipientRequest{ + Name: name, + }) +} + +// Get recipient share permissions. +// +// Gets the share permissions for the specified Recipient. The caller must be a +// metastore admin or the owner of the Recipient. +func (a *RecipientsPreviewAPI) SharePermissionsByName(ctx context.Context, name string) (*GetRecipientSharePermissionsResponse, error) { + return a.recipientsPreviewImpl.SharePermissions(ctx, SharePermissionsRequest{ + Name: name, + }) +} + +type SharesPreviewInterface interface { + + // Create a share. + // + // Creates a new share for data objects. Data objects can be added after + // creation with **update**. The caller must be a metastore admin or have the + // **CREATE_SHARE** privilege on the metastore. + Create(ctx context.Context, request CreateShare) (*ShareInfo, error) + + // Delete a share. + // + // Deletes a data object share from the metastore. The caller must be an owner + // of the share. + Delete(ctx context.Context, request DeleteShareRequest) error + + // Delete a share. + // + // Deletes a data object share from the metastore. The caller must be an owner + // of the share. + DeleteByName(ctx context.Context, name string) error + + // Get a share. + // + // Gets a data object share from the metastore. The caller must be a metastore + // admin or the owner of the share. + Get(ctx context.Context, request GetShareRequest) (*ShareInfo, error) + + // Get a share. + // + // Gets a data object share from the metastore. The caller must be a metastore + // admin or the owner of the share. + GetByName(ctx context.Context, name string) (*ShareInfo, error) + + // List shares. + // + // Gets an array of data object shares from the metastore. The caller must be a + // metastore admin or the owner of the share. There is no guarantee of a + // specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListSharesRequest) listing.Iterator[ShareInfo] + + // List shares. + // + // Gets an array of data object shares from the metastore. The caller must be a + // metastore admin or the owner of the share. There is no guarantee of a + // specific ordering of the elements in the array. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListSharesRequest) ([]ShareInfo, error) + + // Get permissions. + // + // Gets the permissions for a data share from the metastore. The caller must be + // a metastore admin or the owner of the share. + SharePermissions(ctx context.Context, request SharePermissionsRequest) (*PermissionsList, error) + + // Get permissions. + // + // Gets the permissions for a data share from the metastore. The caller must be + // a metastore admin or the owner of the share. + SharePermissionsByName(ctx context.Context, name string) (*PermissionsList, error) + + // Update a share. + // + // Updates the share with the changes and data objects in the request. The + // caller must be the owner of the share or a metastore admin. + // + // When the caller is a metastore admin, only the __owner__ field can be + // updated. + // + // In the case that the share name is changed, **updateShare** requires that the + // caller is both the share owner and a metastore admin. + // + // If there are notebook files in the share, the __storage_root__ field cannot + // be updated. + // + // For each table that is added through this method, the share owner must also + // have **SELECT** privilege on the table. This privilege must be maintained + // indefinitely for recipients to be able to access the table. Typically, you + // should use a group as the share owner. + // + // Table removals through **update** do not require additional privileges. + Update(ctx context.Context, request UpdateShare) (*ShareInfo, error) + + // Update permissions. + // + // Updates the permissions for a data share in the metastore. The caller must be + // a metastore admin or an owner of the share. + // + // For new recipient grants, the user must also be the owner of the recipients. + // recipient revocations do not require additional privileges. + UpdatePermissions(ctx context.Context, request UpdateSharePermissions) error +} + +func NewSharesPreview(client *client.DatabricksClient) *SharesPreviewAPI { + return &SharesPreviewAPI{ + sharesPreviewImpl: sharesPreviewImpl{ + client: client, + }, + } +} + +// A share is a container instantiated with :method:shares/create. Once created +// you can iteratively register a collection of existing data assets defined +// within the metastore using :method:shares/update. You can register data +// assets under their original name, qualified by their original schema, or +// provide alternate exposed names. +type SharesPreviewAPI struct { + sharesPreviewImpl +} + +// Delete a share. +// +// Deletes a data object share from the metastore. The caller must be an owner +// of the share. +func (a *SharesPreviewAPI) DeleteByName(ctx context.Context, name string) error { + return a.sharesPreviewImpl.Delete(ctx, DeleteShareRequest{ + Name: name, + }) +} + +// Get a share. +// +// Gets a data object share from the metastore. The caller must be a metastore +// admin or the owner of the share. +func (a *SharesPreviewAPI) GetByName(ctx context.Context, name string) (*ShareInfo, error) { + return a.sharesPreviewImpl.Get(ctx, GetShareRequest{ + Name: name, + }) +} + +// Get permissions. +// +// Gets the permissions for a data share from the metastore. The caller must be +// a metastore admin or the owner of the share. +func (a *SharesPreviewAPI) SharePermissionsByName(ctx context.Context, name string) (*PermissionsList, error) { + return a.sharesPreviewImpl.SharePermissions(ctx, SharePermissionsRequest{ + Name: name, + }) +} diff --git a/sharing/v2preview/client.go b/sharing/v2preview/client.go new file mode 100755 index 000000000..6fd9bc330 --- /dev/null +++ b/sharing/v2preview/client.go @@ -0,0 +1,147 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package sharingpreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type ProvidersPreviewClient struct { + ProvidersPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewProvidersPreviewClient(cfg *config.Config) (*ProvidersPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ProvidersPreviewClient{ + Config: cfg, + apiClient: apiClient, + ProvidersPreviewInterface: NewProvidersPreview(databricksClient), + }, nil +} + +type RecipientActivationPreviewClient struct { + RecipientActivationPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewRecipientActivationPreviewClient(cfg *config.Config) (*RecipientActivationPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &RecipientActivationPreviewClient{ + Config: cfg, + apiClient: apiClient, + RecipientActivationPreviewInterface: NewRecipientActivationPreview(databricksClient), + }, nil +} + +type RecipientsPreviewClient struct { + RecipientsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewRecipientsPreviewClient(cfg *config.Config) (*RecipientsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &RecipientsPreviewClient{ + Config: cfg, + apiClient: apiClient, + RecipientsPreviewInterface: NewRecipientsPreview(databricksClient), + }, nil +} + +type SharesPreviewClient struct { + SharesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewSharesPreviewClient(cfg *config.Config) (*SharesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &SharesPreviewClient{ + Config: cfg, + apiClient: apiClient, + SharesPreviewInterface: NewSharesPreview(databricksClient), + }, nil +} diff --git a/sharing/v2preview/impl.go b/sharing/v2preview/impl.go new file mode 100755 index 000000000..eecd64f1e --- /dev/null +++ b/sharing/v2preview/impl.go @@ -0,0 +1,424 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package sharingpreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" + "golang.org/x/exp/slices" +) + +// unexported type that holds implementations of just ProvidersPreview API methods +type providersPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *providersPreviewImpl) Create(ctx context.Context, request CreateProvider) (*ProviderInfo, error) { + var providerInfo ProviderInfo + path := "/api/2.1preview/unity-catalog/providers" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &providerInfo) + return &providerInfo, err +} + +func (a *providersPreviewImpl) Delete(ctx context.Context, request DeleteProviderRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/providers/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *providersPreviewImpl) Get(ctx context.Context, request GetProviderRequest) (*ProviderInfo, error) { + var providerInfo ProviderInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/providers/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &providerInfo) + return &providerInfo, err +} + +// List providers. +// +// Gets an array of available authentication providers. The caller must either +// be a metastore admin or the owner of the providers. Providers not owned by +// the caller are not included in the response. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *providersPreviewImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { + + getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListProvidersResponse) []ProviderInfo { + return resp.Providers + } + getNextReq := func(resp *ListProvidersResponse) *ListProvidersRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List providers. +// +// Gets an array of available authentication providers. The caller must either +// be a metastore admin or the owner of the providers. Providers not owned by +// the caller are not included in the response. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *providersPreviewImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ProviderInfo](ctx, iterator) +} +func (a *providersPreviewImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { + var listProvidersResponse ListProvidersResponse + path := "/api/2.1preview/unity-catalog/providers" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listProvidersResponse) + return &listProvidersResponse, err +} + +// List shares by Provider. +// +// Gets an array of a specified provider's shares within the metastore where: +// +// * the caller is a metastore admin, or * the caller is the owner. +func (a *providersPreviewImpl) ListShares(ctx context.Context, request ListSharesRequest) listing.Iterator[ProviderShare] { + + getNextPage := func(ctx context.Context, req ListSharesRequest) (*ListProviderSharesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListShares(ctx, req) + } + getItems := func(resp *ListProviderSharesResponse) []ProviderShare { + return resp.Shares + } + getNextReq := func(resp *ListProviderSharesResponse) *ListSharesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List shares by Provider. +// +// Gets an array of a specified provider's shares within the metastore where: +// +// * the caller is a metastore admin, or * the caller is the owner. +func (a *providersPreviewImpl) ListSharesAll(ctx context.Context, request ListSharesRequest) ([]ProviderShare, error) { + iterator := a.ListShares(ctx, request) + return listing.ToSlice[ProviderShare](ctx, iterator) +} +func (a *providersPreviewImpl) internalListShares(ctx context.Context, request ListSharesRequest) (*ListProviderSharesResponse, error) { + var listProviderSharesResponse ListProviderSharesResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/providers/%v/shares", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listProviderSharesResponse) + return &listProviderSharesResponse, err +} + +func (a *providersPreviewImpl) Update(ctx context.Context, request UpdateProvider) (*ProviderInfo, error) { + var providerInfo ProviderInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/providers/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &providerInfo) + return &providerInfo, err +} + +// unexported type that holds implementations of just RecipientActivationPreview API methods +type recipientActivationPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *recipientActivationPreviewImpl) GetActivationUrlInfo(ctx context.Context, request GetActivationUrlInfoRequest) error { + var getActivationUrlInfoResponse GetActivationUrlInfoResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/public/data_sharing_activation_info/%v", request.ActivationUrl) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getActivationUrlInfoResponse) + return err +} + +func (a *recipientActivationPreviewImpl) RetrieveToken(ctx context.Context, request RetrieveTokenRequest) (*RetrieveTokenResponse, error) { + var retrieveTokenResponse RetrieveTokenResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/public/data_sharing_activation/%v", request.ActivationUrl) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &retrieveTokenResponse) + return &retrieveTokenResponse, err +} + +// unexported type that holds implementations of just RecipientsPreview API methods +type recipientsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *recipientsPreviewImpl) Create(ctx context.Context, request CreateRecipient) (*RecipientInfo, error) { + var recipientInfo RecipientInfo + path := "/api/2.1preview/unity-catalog/recipients" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &recipientInfo) + return &recipientInfo, err +} + +func (a *recipientsPreviewImpl) Delete(ctx context.Context, request DeleteRecipientRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/recipients/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *recipientsPreviewImpl) Get(ctx context.Context, request GetRecipientRequest) (*RecipientInfo, error) { + var recipientInfo RecipientInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/recipients/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &recipientInfo) + return &recipientInfo, err +} + +// List share recipients. +// +// Gets an array of all share recipients within the current metastore where: +// +// * the caller is a metastore admin, or * the caller is the owner. There is no +// guarantee of a specific ordering of the elements in the array. +func (a *recipientsPreviewImpl) List(ctx context.Context, request ListRecipientsRequest) listing.Iterator[RecipientInfo] { + + getNextPage := func(ctx context.Context, req ListRecipientsRequest) (*ListRecipientsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListRecipientsResponse) []RecipientInfo { + return resp.Recipients + } + getNextReq := func(resp *ListRecipientsResponse) *ListRecipientsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List share recipients. +// +// Gets an array of all share recipients within the current metastore where: +// +// * the caller is a metastore admin, or * the caller is the owner. There is no +// guarantee of a specific ordering of the elements in the array. +func (a *recipientsPreviewImpl) ListAll(ctx context.Context, request ListRecipientsRequest) ([]RecipientInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[RecipientInfo](ctx, iterator) +} +func (a *recipientsPreviewImpl) internalList(ctx context.Context, request ListRecipientsRequest) (*ListRecipientsResponse, error) { + var listRecipientsResponse ListRecipientsResponse + path := "/api/2.1preview/unity-catalog/recipients" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listRecipientsResponse) + return &listRecipientsResponse, err +} + +func (a *recipientsPreviewImpl) RotateToken(ctx context.Context, request RotateRecipientToken) (*RecipientInfo, error) { + var recipientInfo RecipientInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/recipients/%v/rotate-token", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &recipientInfo) + return &recipientInfo, err +} + +func (a *recipientsPreviewImpl) SharePermissions(ctx context.Context, request SharePermissionsRequest) (*GetRecipientSharePermissionsResponse, error) { + var getRecipientSharePermissionsResponse GetRecipientSharePermissionsResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/recipients/%v/share-permissions", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getRecipientSharePermissionsResponse) + return &getRecipientSharePermissionsResponse, err +} + +func (a *recipientsPreviewImpl) Update(ctx context.Context, request UpdateRecipient) (*RecipientInfo, error) { + var recipientInfo RecipientInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/recipients/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &recipientInfo) + return &recipientInfo, err +} + +// unexported type that holds implementations of just SharesPreview API methods +type sharesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *sharesPreviewImpl) Create(ctx context.Context, request CreateShare) (*ShareInfo, error) { + var shareInfo ShareInfo + path := "/api/2.1preview/unity-catalog/shares" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &shareInfo) + return &shareInfo, err +} + +func (a *sharesPreviewImpl) Delete(ctx context.Context, request DeleteShareRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/shares/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *sharesPreviewImpl) Get(ctx context.Context, request GetShareRequest) (*ShareInfo, error) { + var shareInfo ShareInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/shares/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &shareInfo) + return &shareInfo, err +} + +// List shares. +// +// Gets an array of data object shares from the metastore. The caller must be a +// metastore admin or the owner of the share. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *sharesPreviewImpl) List(ctx context.Context, request ListSharesRequest) listing.Iterator[ShareInfo] { + + getNextPage := func(ctx context.Context, req ListSharesRequest) (*ListSharesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListSharesResponse) []ShareInfo { + return resp.Shares + } + getNextReq := func(resp *ListSharesResponse) *ListSharesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List shares. +// +// Gets an array of data object shares from the metastore. The caller must be a +// metastore admin or the owner of the share. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *sharesPreviewImpl) ListAll(ctx context.Context, request ListSharesRequest) ([]ShareInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ShareInfo](ctx, iterator) +} +func (a *sharesPreviewImpl) internalList(ctx context.Context, request ListSharesRequest) (*ListSharesResponse, error) { + var listSharesResponse ListSharesResponse + path := "/api/2.1preview/unity-catalog/shares" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSharesResponse) + return &listSharesResponse, err +} + +func (a *sharesPreviewImpl) SharePermissions(ctx context.Context, request SharePermissionsRequest) (*PermissionsList, error) { + var permissionsList PermissionsList + path := fmt.Sprintf("/api/2.1preview/unity-catalog/shares/%v/permissions", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &permissionsList) + return &permissionsList, err +} + +func (a *sharesPreviewImpl) Update(ctx context.Context, request UpdateShare) (*ShareInfo, error) { + var shareInfo ShareInfo + path := fmt.Sprintf("/api/2.1preview/unity-catalog/shares/%v", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &shareInfo) + return &shareInfo, err +} + +func (a *sharesPreviewImpl) UpdatePermissions(ctx context.Context, request UpdateSharePermissions) error { + var updatePermissionsResponse UpdatePermissionsResponse + path := fmt.Sprintf("/api/2.1preview/unity-catalog/shares/%v/permissions", request.Name) + queryParams := make(map[string]any) + if request.MaxResults != 0 || slices.Contains(request.ForceSendFields, "MaxResults") { + queryParams["max_results"] = request.MaxResults + } + if request.PageToken != "" || slices.Contains(request.ForceSendFields, "PageToken") { + queryParams["page_token"] = request.PageToken + } + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updatePermissionsResponse) + return err +} diff --git a/sharing/v2preview/model.go b/sharing/v2preview/model.go new file mode 100755 index 000000000..8007814c8 --- /dev/null +++ b/sharing/v2preview/model.go @@ -0,0 +1,1305 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package sharingpreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +// The delta sharing authentication type. +type AuthenticationType string + +const AuthenticationTypeDatabricks AuthenticationType = `DATABRICKS` + +const AuthenticationTypeToken AuthenticationType = `TOKEN` + +// String representation for [fmt.Print] +func (f *AuthenticationType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AuthenticationType) Set(v string) error { + switch v { + case `DATABRICKS`, `TOKEN`: + *f = AuthenticationType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATABRICKS", "TOKEN"`, v) + } +} + +// Type always returns AuthenticationType to satisfy [pflag.Value] interface +func (f *AuthenticationType) Type() string { + return "AuthenticationType" +} + +type CreateProvider struct { + // The delta sharing authentication type. + AuthenticationType AuthenticationType `json:"authentication_type"` + // Description about the provider. + Comment string `json:"comment,omitempty"` + // The name of the Provider. + Name string `json:"name"` + // This field is required when the __authentication_type__ is **TOKEN**, + // **OAUTH_CLIENT_CREDENTIALS** or not provided. + RecipientProfileStr string `json:"recipient_profile_str,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateProvider) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateProvider) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateRecipient struct { + // The delta sharing authentication type. + AuthenticationType AuthenticationType `json:"authentication_type"` + // Description about the recipient. + Comment string `json:"comment,omitempty"` + // The global Unity Catalog metastore id provided by the data recipient. + // This field is only present when the __authentication_type__ is + // **DATABRICKS**. The identifier is of format + // __cloud__:__region__:__metastore-uuid__. + DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"` + // Expiration timestamp of the token, in epoch milliseconds. + ExpirationTime int64 `json:"expiration_time,omitempty"` + // IP Access List + IpAccessList *IpAccessList `json:"ip_access_list,omitempty"` + // Name of Recipient. + Name string `json:"name"` + // Username of the recipient owner. + Owner string `json:"owner,omitempty"` + // Recipient properties as map of string key-value pairs. When provided in + // update request, the specified properties will override the existing + // properties. To add and remove properties, one would need to perform a + // read-modify-write. + PropertiesKvpairs *SecurablePropertiesKvPairs `json:"properties_kvpairs,omitempty"` + // The one-time sharing code provided by the data recipient. This field is + // only present when the __authentication_type__ is **DATABRICKS**. + SharingCode string `json:"sharing_code,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateRecipient) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateRecipient) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateShare struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Name of the share. + Name string `json:"name"` + // Storage root URL for the share. + StorageRoot string `json:"storage_root,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateShare) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateShare) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete a provider +type DeleteProviderRequest struct { + // Name of the provider. + Name string `json:"-" url:"-"` +} + +// Delete a share recipient +type DeleteRecipientRequest struct { + // Name of the recipient. + Name string `json:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete a share +type DeleteShareRequest struct { + // The name of the share. + Name string `json:"-" url:"-"` +} + +// Get a share activation URL +type GetActivationUrlInfoRequest struct { + // The one time activation url. It also accepts activation token. + ActivationUrl string `json:"-" url:"-"` +} + +type GetActivationUrlInfoResponse struct { +} + +// Get a provider +type GetProviderRequest struct { + // Name of the provider. + Name string `json:"-" url:"-"` +} + +// Get a share recipient +type GetRecipientRequest struct { + // Name of the recipient. + Name string `json:"-" url:"-"` +} + +type GetRecipientSharePermissionsResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + // An array of data share permissions for a recipient. + PermissionsOut []ShareToPrivilegeAssignment `json:"permissions_out,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetRecipientSharePermissionsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetRecipientSharePermissionsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get a share +type GetShareRequest struct { + // Query for data to include in the share. + IncludeSharedData bool `json:"-" url:"include_shared_data,omitempty"` + // The name of the share. + Name string `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetShareRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetShareRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type IpAccessList struct { + // Allowed IP Addresses in CIDR notation. Limit of 100. + AllowedIpAddresses []string `json:"allowed_ip_addresses,omitempty"` +} + +type ListProviderSharesResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + // An array of provider shares. + Shares []ProviderShare `json:"shares,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListProviderSharesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListProviderSharesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List providers +type ListProvidersRequest struct { + // If not provided, all providers will be returned. If no providers exist + // with this ID, no results will be returned. + DataProviderGlobalMetastoreId string `json:"-" url:"data_provider_global_metastore_id,omitempty"` + // Maximum number of providers to return. - when set to 0, the page length + // is set to a server configured value (recommended); - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to a value less than 0, an invalid parameter + // error is returned; - If not set, all valid providers are returned (not + // recommended). - Note: The number of returned providers might be less than + // the specified max_results size, even zero. The only definitive indication + // that no further providers can be fetched is when the next_page_token is + // unset from the response. + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListProvidersRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListProvidersRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListProvidersResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + // An array of provider information objects. + Providers []ProviderInfo `json:"providers,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListProvidersResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListProvidersResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List share recipients +type ListRecipientsRequest struct { + // If not provided, all recipients will be returned. If no recipients exist + // with this ID, no results will be returned. + DataRecipientGlobalMetastoreId string `json:"-" url:"data_recipient_global_metastore_id,omitempty"` + // Maximum number of recipients to return. - when set to 0, the page length + // is set to a server configured value (recommended); - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to a value less than 0, an invalid parameter + // error is returned; - If not set, all valid recipients are returned (not + // recommended). - Note: The number of returned recipients might be less + // than the specified max_results size, even zero. The only definitive + // indication that no further recipients can be fetched is when the + // next_page_token is unset from the response. + MaxResults int `json:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListRecipientsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListRecipientsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListRecipientsResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + // An array of recipient information objects. + Recipients []RecipientInfo `json:"recipients,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListRecipientsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListRecipientsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List shares by Provider +type ListSharesRequest struct { + // Maximum number of shares to return. - when set to 0, the page length is + // set to a server configured value (recommended); - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to a value less than 0, an invalid parameter + // error is returned; - If not set, all valid shares are returned (not + // recommended). - Note: The number of returned shares might be less than + // the specified max_results size, even zero. The only definitive indication + // that no further shares can be fetched is when the next_page_token is + // unset from the response. + MaxResults int `json:"-" url:"max_results,omitempty"` + // Name of the provider in which to list shares. + Name string `json:"-" url:"-"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListSharesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListSharesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListSharesResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + // An array of data share information objects. + Shares []ShareInfo `json:"shares,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListSharesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListSharesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Partition struct { + // An array of partition values. + Values []PartitionValue `json:"values,omitempty"` +} + +type PartitionValue struct { + // The name of the partition column. + Name string `json:"name,omitempty"` + // The operator to apply for the value. + Op PartitionValueOp `json:"op,omitempty"` + // The key of a Delta Sharing recipient's property. For example + // "databricks-account-id". When this field is set, field `value` can not be + // set. + RecipientPropertyKey string `json:"recipient_property_key,omitempty"` + // The value of the partition column. When this value is not set, it means + // `null` value. When this field is set, field `recipient_property_key` can + // not be set. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PartitionValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PartitionValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PartitionValueOp string + +const PartitionValueOpEqual PartitionValueOp = `EQUAL` + +const PartitionValueOpLike PartitionValueOp = `LIKE` + +// String representation for [fmt.Print] +func (f *PartitionValueOp) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PartitionValueOp) Set(v string) error { + switch v { + case `EQUAL`, `LIKE`: + *f = PartitionValueOp(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EQUAL", "LIKE"`, v) + } +} + +// Type always returns PartitionValueOp to satisfy [pflag.Value] interface +func (f *PartitionValueOp) Type() string { + return "PartitionValueOp" +} + +type PermissionsChange struct { + // The set of privileges to add. + Add []SharingPrivilege `json:"add,omitempty"` + // The principal whose privileges we are changing. + Principal string `json:"principal,omitempty"` + // The set of privileges to remove. + Remove []SharingPrivilege `json:"remove,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PermissionsChange) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PermissionsChange) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PermissionsList struct { + // The privileges assigned to each principal + PrivilegeAssignments []SharingPrivilegeAssignment `json:"privilege_assignments,omitempty"` +} + +type Privilege string + +const PrivilegeAccess Privilege = `ACCESS` + +const PrivilegeAllPrivileges Privilege = `ALL_PRIVILEGES` + +const PrivilegeApplyTag Privilege = `APPLY_TAG` + +const PrivilegeCreate Privilege = `CREATE` + +const PrivilegeCreateCatalog Privilege = `CREATE_CATALOG` + +const PrivilegeCreateConnection Privilege = `CREATE_CONNECTION` + +const PrivilegeCreateExternalLocation Privilege = `CREATE_EXTERNAL_LOCATION` + +const PrivilegeCreateExternalTable Privilege = `CREATE_EXTERNAL_TABLE` + +const PrivilegeCreateExternalVolume Privilege = `CREATE_EXTERNAL_VOLUME` + +const PrivilegeCreateForeignCatalog Privilege = `CREATE_FOREIGN_CATALOG` + +const PrivilegeCreateForeignSecurable Privilege = `CREATE_FOREIGN_SECURABLE` + +const PrivilegeCreateFunction Privilege = `CREATE_FUNCTION` + +const PrivilegeCreateManagedStorage Privilege = `CREATE_MANAGED_STORAGE` + +const PrivilegeCreateMaterializedView Privilege = `CREATE_MATERIALIZED_VIEW` + +const PrivilegeCreateModel Privilege = `CREATE_MODEL` + +const PrivilegeCreateProvider Privilege = `CREATE_PROVIDER` + +const PrivilegeCreateRecipient Privilege = `CREATE_RECIPIENT` + +const PrivilegeCreateSchema Privilege = `CREATE_SCHEMA` + +const PrivilegeCreateServiceCredential Privilege = `CREATE_SERVICE_CREDENTIAL` + +const PrivilegeCreateShare Privilege = `CREATE_SHARE` + +const PrivilegeCreateStorageCredential Privilege = `CREATE_STORAGE_CREDENTIAL` + +const PrivilegeCreateTable Privilege = `CREATE_TABLE` + +const PrivilegeCreateView Privilege = `CREATE_VIEW` + +const PrivilegeCreateVolume Privilege = `CREATE_VOLUME` + +const PrivilegeExecute Privilege = `EXECUTE` + +const PrivilegeManage Privilege = `MANAGE` + +const PrivilegeManageAllowlist Privilege = `MANAGE_ALLOWLIST` + +const PrivilegeModify Privilege = `MODIFY` + +const PrivilegeReadFiles Privilege = `READ_FILES` + +const PrivilegeReadPrivateFiles Privilege = `READ_PRIVATE_FILES` + +const PrivilegeReadVolume Privilege = `READ_VOLUME` + +const PrivilegeRefresh Privilege = `REFRESH` + +const PrivilegeSelect Privilege = `SELECT` + +const PrivilegeSetSharePermission Privilege = `SET_SHARE_PERMISSION` + +const PrivilegeUsage Privilege = `USAGE` + +const PrivilegeUseCatalog Privilege = `USE_CATALOG` + +const PrivilegeUseConnection Privilege = `USE_CONNECTION` + +const PrivilegeUseMarketplaceAssets Privilege = `USE_MARKETPLACE_ASSETS` + +const PrivilegeUseProvider Privilege = `USE_PROVIDER` + +const PrivilegeUseRecipient Privilege = `USE_RECIPIENT` + +const PrivilegeUseSchema Privilege = `USE_SCHEMA` + +const PrivilegeUseShare Privilege = `USE_SHARE` + +const PrivilegeWriteFiles Privilege = `WRITE_FILES` + +const PrivilegeWritePrivateFiles Privilege = `WRITE_PRIVATE_FILES` + +const PrivilegeWriteVolume Privilege = `WRITE_VOLUME` + +// String representation for [fmt.Print] +func (f *Privilege) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Privilege) Set(v string) error { + switch v { + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + *f = Privilege(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + } +} + +// Type always returns Privilege to satisfy [pflag.Value] interface +func (f *Privilege) Type() string { + return "Privilege" +} + +type PrivilegeAssignment struct { + // The principal (user email address or group name). + Principal string `json:"principal,omitempty"` + // The privileges assigned to the principal. + Privileges []Privilege `json:"privileges,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PrivilegeAssignment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PrivilegeAssignment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ProviderInfo struct { + // The delta sharing authentication type. + AuthenticationType AuthenticationType `json:"authentication_type,omitempty"` + // Cloud vendor of the provider's UC metastore. This field is only present + // when the __authentication_type__ is **DATABRICKS**. + Cloud string `json:"cloud,omitempty"` + // Description about the provider. + Comment string `json:"comment,omitempty"` + // Time at which this Provider was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of Provider creator. + CreatedBy string `json:"created_by,omitempty"` + // The global UC metastore id of the data provider. This field is only + // present when the __authentication_type__ is **DATABRICKS**. The + // identifier is of format __cloud__:__region__:__metastore-uuid__. + DataProviderGlobalMetastoreId string `json:"data_provider_global_metastore_id,omitempty"` + // UUID of the provider's UC metastore. This field is only present when the + // __authentication_type__ is **DATABRICKS**. + MetastoreId string `json:"metastore_id,omitempty"` + // The name of the Provider. + Name string `json:"name,omitempty"` + // Username of Provider owner. + Owner string `json:"owner,omitempty"` + // The recipient profile. This field is only present when the + // authentication_type is `TOKEN` or `OAUTH_CLIENT_CREDENTIALS`. + RecipientProfile *RecipientProfile `json:"recipient_profile,omitempty"` + // This field is required when the __authentication_type__ is **TOKEN**, + // **OAUTH_CLIENT_CREDENTIALS** or not provided. + RecipientProfileStr string `json:"recipient_profile_str,omitempty"` + // Cloud region of the provider's UC metastore. This field is only present + // when the __authentication_type__ is **DATABRICKS**. + Region string `json:"region,omitempty"` + // Time at which this Provider was created, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of user who last modified Provider. + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ProviderInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ProviderInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ProviderShare struct { + // The name of the Provider Share. + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ProviderShare) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ProviderShare) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RecipientInfo struct { + // A boolean status field showing whether the Recipient's activation URL has + // been exercised or not. + Activated bool `json:"activated,omitempty"` + // Full activation url to retrieve the access token. It will be empty if the + // token is already retrieved. + ActivationUrl string `json:"activation_url,omitempty"` + // The delta sharing authentication type. + AuthenticationType AuthenticationType `json:"authentication_type,omitempty"` + // Cloud vendor of the recipient's Unity Catalog Metastore. This field is + // only present when the __authentication_type__ is **DATABRICKS**. + Cloud string `json:"cloud,omitempty"` + // Description about the recipient. + Comment string `json:"comment,omitempty"` + // Time at which this recipient was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of recipient creator. + CreatedBy string `json:"created_by,omitempty"` + // The global Unity Catalog metastore id provided by the data recipient. + // This field is only present when the __authentication_type__ is + // **DATABRICKS**. The identifier is of format + // __cloud__:__region__:__metastore-uuid__. + DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"` + // Expiration timestamp of the token, in epoch milliseconds. + ExpirationTime int64 `json:"expiration_time,omitempty"` + // IP Access List + IpAccessList *IpAccessList `json:"ip_access_list,omitempty"` + // Unique identifier of recipient's Unity Catalog Metastore. This field is + // only present when the __authentication_type__ is **DATABRICKS**. + MetastoreId string `json:"metastore_id,omitempty"` + // Name of Recipient. + Name string `json:"name,omitempty"` + // Username of the recipient owner. + Owner string `json:"owner,omitempty"` + // Recipient properties as map of string key-value pairs. When provided in + // update request, the specified properties will override the existing + // properties. To add and remove properties, one would need to perform a + // read-modify-write. + PropertiesKvpairs *SecurablePropertiesKvPairs `json:"properties_kvpairs,omitempty"` + // Cloud region of the recipient's Unity Catalog Metastore. This field is + // only present when the __authentication_type__ is **DATABRICKS**. + Region string `json:"region,omitempty"` + // The one-time sharing code provided by the data recipient. This field is + // only present when the __authentication_type__ is **DATABRICKS**. + SharingCode string `json:"sharing_code,omitempty"` + // This field is only present when the __authentication_type__ is **TOKEN**. + Tokens []RecipientTokenInfo `json:"tokens,omitempty"` + // Time at which the recipient was updated, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of recipient updater. + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RecipientInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RecipientInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RecipientProfile struct { + // The token used to authorize the recipient. + BearerToken string `json:"bearer_token,omitempty"` + // The endpoint for the share to be used by the recipient. + Endpoint string `json:"endpoint,omitempty"` + // The version number of the recipient's credentials on a share. + ShareCredentialsVersion int `json:"share_credentials_version,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RecipientProfile) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RecipientProfile) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RecipientTokenInfo struct { + // Full activation URL to retrieve the access token. It will be empty if the + // token is already retrieved. + ActivationUrl string `json:"activation_url,omitempty"` + // Time at which this recipient token was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of recipient token creator. + CreatedBy string `json:"created_by,omitempty"` + // Expiration timestamp of the token in epoch milliseconds. + ExpirationTime int64 `json:"expiration_time,omitempty"` + // Unique ID of the recipient token. + Id string `json:"id,omitempty"` + // Time at which this recipient token was updated, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of recipient token updater. + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RecipientTokenInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RecipientTokenInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get an access token +type RetrieveTokenRequest struct { + // The one time activation url. It also accepts activation token. + ActivationUrl string `json:"-" url:"-"` +} + +type RetrieveTokenResponse struct { + // The token used to authorize the recipient. + BearerToken string `json:"bearerToken,omitempty"` + // The endpoint for the share to be used by the recipient. + Endpoint string `json:"endpoint,omitempty"` + // Expiration timestamp of the token in epoch milliseconds. + ExpirationTime string `json:"expirationTime,omitempty"` + // These field names must follow the delta sharing protocol. + ShareCredentialsVersion int `json:"shareCredentialsVersion,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RetrieveTokenResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RetrieveTokenResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RotateRecipientToken struct { + // The expiration time of the bearer token in ISO 8601 format. This will set + // the expiration_time of existing token only to a smaller timestamp, it + // cannot extend the expiration_time. Use 0 to expire the existing token + // immediately, negative number will return an error. + ExistingTokenExpireInSeconds int64 `json:"existing_token_expire_in_seconds"` + // The name of the Recipient. + Name string `json:"-" url:"-"` +} + +// An object with __properties__ containing map of key-value properties attached +// to the securable. +type SecurablePropertiesKvPairs struct { + // A map of key-value properties attached to the securable. + Properties map[string]string `json:"properties"` +} + +type ShareInfo struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // Time at which this share was created, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of share creator. + CreatedBy string `json:"created_by,omitempty"` + // Name of the share. + Name string `json:"name,omitempty"` + // A list of shared data objects within the share. + Objects []SharedDataObject `json:"objects,omitempty"` + // Username of current owner of share. + Owner string `json:"owner,omitempty"` + // Storage Location URL (full path) for the share. + StorageLocation string `json:"storage_location,omitempty"` + // Storage root URL for the share. + StorageRoot string `json:"storage_root,omitempty"` + // Time at which this share was updated, in epoch milliseconds. + UpdatedAt int64 `json:"updated_at,omitempty"` + // Username of share updater. + UpdatedBy string `json:"updated_by,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ShareInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ShareInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get recipient share permissions +type SharePermissionsRequest struct { + // Maximum number of permissions to return. - when set to 0, the page length + // is set to a server configured value (recommended); - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to a value less than 0, an invalid parameter + // error is returned; - If not set, all valid permissions are returned (not + // recommended). - Note: The number of returned permissions might be less + // than the specified max_results size, even zero. The only definitive + // indication that no further permissions can be fetched is when the + // next_page_token is unset from the response. + MaxResults int `json:"-" url:"max_results,omitempty"` + // The name of the Recipient. + Name string `json:"-" url:"-"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SharePermissionsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SharePermissionsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ShareToPrivilegeAssignment struct { + // The privileges assigned to the principal. + PrivilegeAssignments []PrivilegeAssignment `json:"privilege_assignments,omitempty"` + // The share name. + ShareName string `json:"share_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ShareToPrivilegeAssignment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ShareToPrivilegeAssignment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SharedDataObject struct { + // The time when this data object is added to the share, in epoch + // milliseconds. + AddedAt int64 `json:"added_at,omitempty"` + // Username of the sharer. + AddedBy string `json:"added_by,omitempty"` + // Whether to enable cdf or indicate if cdf is enabled on the shared object. + CdfEnabled bool `json:"cdf_enabled,omitempty"` + // A user-provided comment when adding the data object to the share. + // [Update:OPT] + Comment string `json:"comment,omitempty"` + // The content of the notebook file when the data object type is + // NOTEBOOK_FILE. This should be base64 encoded. Required for adding a + // NOTEBOOK_FILE, optional for updating, ignored for other types. + Content string `json:"content,omitempty"` + // The type of the data object. + DataObjectType SharedDataObjectDataObjectType `json:"data_object_type,omitempty"` + // Whether to enable or disable sharing of data history. If not specified, + // the default is **DISABLED**. + HistoryDataSharingStatus SharedDataObjectHistoryDataSharingStatus `json:"history_data_sharing_status,omitempty"` + // A fully qualified name that uniquely identifies a data object. + // + // For example, a table's fully qualified name is in the format of + // `..`. + Name string `json:"name"` + // Array of partitions for the shared data. + Partitions []Partition `json:"partitions,omitempty"` + // A user-provided new name for the data object within the share. If this + // new name is not provided, the object's original name will be used as the + // `shared_as` name. The `shared_as` name must be unique within a share. For + // tables, the new name must follow the format of `.
`. + SharedAs string `json:"shared_as,omitempty"` + // The start version associated with the object. This allows data providers + // to control the lowest object version that is accessible by clients. If + // specified, clients can query snapshots or changes for versions >= + // start_version. If not specified, clients can only query starting from the + // version of the object at the time it was added to the share. + // + // NOTE: The start_version should be <= the `current` version of the object. + StartVersion int64 `json:"start_version,omitempty"` + // One of: **ACTIVE**, **PERMISSION_DENIED**. + Status SharedDataObjectStatus `json:"status,omitempty"` + // A user-provided new name for the data object within the share. If this + // new name is not provided, the object's original name will be used as the + // `string_shared_as` name. The `string_shared_as` name must be unique + // within a share. For notebooks, the new name should be the new notebook + // file name. + StringSharedAs string `json:"string_shared_as,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SharedDataObject) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SharedDataObject) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of the data object. +type SharedDataObjectDataObjectType string + +const SharedDataObjectDataObjectTypeFeatureSpec SharedDataObjectDataObjectType = `FEATURE_SPEC` + +const SharedDataObjectDataObjectTypeFunction SharedDataObjectDataObjectType = `FUNCTION` + +const SharedDataObjectDataObjectTypeMaterializedView SharedDataObjectDataObjectType = `MATERIALIZED_VIEW` + +const SharedDataObjectDataObjectTypeModel SharedDataObjectDataObjectType = `MODEL` + +const SharedDataObjectDataObjectTypeNotebookFile SharedDataObjectDataObjectType = `NOTEBOOK_FILE` + +const SharedDataObjectDataObjectTypeSchema SharedDataObjectDataObjectType = `SCHEMA` + +const SharedDataObjectDataObjectTypeStreamingTable SharedDataObjectDataObjectType = `STREAMING_TABLE` + +const SharedDataObjectDataObjectTypeTable SharedDataObjectDataObjectType = `TABLE` + +const SharedDataObjectDataObjectTypeView SharedDataObjectDataObjectType = `VIEW` + +// String representation for [fmt.Print] +func (f *SharedDataObjectDataObjectType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SharedDataObjectDataObjectType) Set(v string) error { + switch v { + case `FEATURE_SPEC`, `FUNCTION`, `MATERIALIZED_VIEW`, `MODEL`, `NOTEBOOK_FILE`, `SCHEMA`, `STREAMING_TABLE`, `TABLE`, `VIEW`: + *f = SharedDataObjectDataObjectType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FEATURE_SPEC", "FUNCTION", "MATERIALIZED_VIEW", "MODEL", "NOTEBOOK_FILE", "SCHEMA", "STREAMING_TABLE", "TABLE", "VIEW"`, v) + } +} + +// Type always returns SharedDataObjectDataObjectType to satisfy [pflag.Value] interface +func (f *SharedDataObjectDataObjectType) Type() string { + return "SharedDataObjectDataObjectType" +} + +// Whether to enable or disable sharing of data history. If not specified, the +// default is **DISABLED**. +type SharedDataObjectHistoryDataSharingStatus string + +const SharedDataObjectHistoryDataSharingStatusDisabled SharedDataObjectHistoryDataSharingStatus = `DISABLED` + +const SharedDataObjectHistoryDataSharingStatusEnabled SharedDataObjectHistoryDataSharingStatus = `ENABLED` + +// String representation for [fmt.Print] +func (f *SharedDataObjectHistoryDataSharingStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SharedDataObjectHistoryDataSharingStatus) Set(v string) error { + switch v { + case `DISABLED`, `ENABLED`: + *f = SharedDataObjectHistoryDataSharingStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISABLED", "ENABLED"`, v) + } +} + +// Type always returns SharedDataObjectHistoryDataSharingStatus to satisfy [pflag.Value] interface +func (f *SharedDataObjectHistoryDataSharingStatus) Type() string { + return "SharedDataObjectHistoryDataSharingStatus" +} + +// One of: **ACTIVE**, **PERMISSION_DENIED**. +type SharedDataObjectStatus string + +const SharedDataObjectStatusActive SharedDataObjectStatus = `ACTIVE` + +const SharedDataObjectStatusPermissionDenied SharedDataObjectStatus = `PERMISSION_DENIED` + +// String representation for [fmt.Print] +func (f *SharedDataObjectStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SharedDataObjectStatus) Set(v string) error { + switch v { + case `ACTIVE`, `PERMISSION_DENIED`: + *f = SharedDataObjectStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "PERMISSION_DENIED"`, v) + } +} + +// Type always returns SharedDataObjectStatus to satisfy [pflag.Value] interface +func (f *SharedDataObjectStatus) Type() string { + return "SharedDataObjectStatus" +} + +type SharedDataObjectUpdate struct { + // One of: **ADD**, **REMOVE**, **UPDATE**. + Action SharedDataObjectUpdateAction `json:"action,omitempty"` + // The data object that is being added, removed, or updated. + DataObject *SharedDataObject `json:"data_object,omitempty"` +} + +// One of: **ADD**, **REMOVE**, **UPDATE**. +type SharedDataObjectUpdateAction string + +const SharedDataObjectUpdateActionAdd SharedDataObjectUpdateAction = `ADD` + +const SharedDataObjectUpdateActionRemove SharedDataObjectUpdateAction = `REMOVE` + +const SharedDataObjectUpdateActionUpdate SharedDataObjectUpdateAction = `UPDATE` + +// String representation for [fmt.Print] +func (f *SharedDataObjectUpdateAction) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SharedDataObjectUpdateAction) Set(v string) error { + switch v { + case `ADD`, `REMOVE`, `UPDATE`: + *f = SharedDataObjectUpdateAction(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ADD", "REMOVE", "UPDATE"`, v) + } +} + +// Type always returns SharedDataObjectUpdateAction to satisfy [pflag.Value] interface +func (f *SharedDataObjectUpdateAction) Type() string { + return "SharedDataObjectUpdateAction" +} + +type SharingPrivilege string + +const SharingPrivilegeAccess SharingPrivilege = `ACCESS` + +const SharingPrivilegeAllPrivileges SharingPrivilege = `ALL_PRIVILEGES` + +const SharingPrivilegeApplyTag SharingPrivilege = `APPLY_TAG` + +const SharingPrivilegeCreate SharingPrivilege = `CREATE` + +const SharingPrivilegeCreateCatalog SharingPrivilege = `CREATE_CATALOG` + +const SharingPrivilegeCreateConnection SharingPrivilege = `CREATE_CONNECTION` + +const SharingPrivilegeCreateExternalLocation SharingPrivilege = `CREATE_EXTERNAL_LOCATION` + +const SharingPrivilegeCreateExternalTable SharingPrivilege = `CREATE_EXTERNAL_TABLE` + +const SharingPrivilegeCreateExternalVolume SharingPrivilege = `CREATE_EXTERNAL_VOLUME` + +const SharingPrivilegeCreateForeignCatalog SharingPrivilege = `CREATE_FOREIGN_CATALOG` + +const SharingPrivilegeCreateForeignSecurable SharingPrivilege = `CREATE_FOREIGN_SECURABLE` + +const SharingPrivilegeCreateFunction SharingPrivilege = `CREATE_FUNCTION` + +const SharingPrivilegeCreateManagedStorage SharingPrivilege = `CREATE_MANAGED_STORAGE` + +const SharingPrivilegeCreateMaterializedView SharingPrivilege = `CREATE_MATERIALIZED_VIEW` + +const SharingPrivilegeCreateModel SharingPrivilege = `CREATE_MODEL` + +const SharingPrivilegeCreateProvider SharingPrivilege = `CREATE_PROVIDER` + +const SharingPrivilegeCreateRecipient SharingPrivilege = `CREATE_RECIPIENT` + +const SharingPrivilegeCreateSchema SharingPrivilege = `CREATE_SCHEMA` + +const SharingPrivilegeCreateServiceCredential SharingPrivilege = `CREATE_SERVICE_CREDENTIAL` + +const SharingPrivilegeCreateShare SharingPrivilege = `CREATE_SHARE` + +const SharingPrivilegeCreateStorageCredential SharingPrivilege = `CREATE_STORAGE_CREDENTIAL` + +const SharingPrivilegeCreateTable SharingPrivilege = `CREATE_TABLE` + +const SharingPrivilegeCreateView SharingPrivilege = `CREATE_VIEW` + +const SharingPrivilegeCreateVolume SharingPrivilege = `CREATE_VOLUME` + +const SharingPrivilegeExecute SharingPrivilege = `EXECUTE` + +const SharingPrivilegeManage SharingPrivilege = `MANAGE` + +const SharingPrivilegeManageAllowlist SharingPrivilege = `MANAGE_ALLOWLIST` + +const SharingPrivilegeModify SharingPrivilege = `MODIFY` + +const SharingPrivilegeReadFiles SharingPrivilege = `READ_FILES` + +const SharingPrivilegeReadPrivateFiles SharingPrivilege = `READ_PRIVATE_FILES` + +const SharingPrivilegeReadVolume SharingPrivilege = `READ_VOLUME` + +const SharingPrivilegeRefresh SharingPrivilege = `REFRESH` + +const SharingPrivilegeSelect SharingPrivilege = `SELECT` + +const SharingPrivilegeSetSharePermission SharingPrivilege = `SET_SHARE_PERMISSION` + +const SharingPrivilegeUsage SharingPrivilege = `USAGE` + +const SharingPrivilegeUseCatalog SharingPrivilege = `USE_CATALOG` + +const SharingPrivilegeUseConnection SharingPrivilege = `USE_CONNECTION` + +const SharingPrivilegeUseMarketplaceAssets SharingPrivilege = `USE_MARKETPLACE_ASSETS` + +const SharingPrivilegeUseProvider SharingPrivilege = `USE_PROVIDER` + +const SharingPrivilegeUseRecipient SharingPrivilege = `USE_RECIPIENT` + +const SharingPrivilegeUseSchema SharingPrivilege = `USE_SCHEMA` + +const SharingPrivilegeUseShare SharingPrivilege = `USE_SHARE` + +const SharingPrivilegeWriteFiles SharingPrivilege = `WRITE_FILES` + +const SharingPrivilegeWritePrivateFiles SharingPrivilege = `WRITE_PRIVATE_FILES` + +const SharingPrivilegeWriteVolume SharingPrivilege = `WRITE_VOLUME` + +// String representation for [fmt.Print] +func (f *SharingPrivilege) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SharingPrivilege) Set(v string) error { + switch v { + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + *f = SharingPrivilege(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + } +} + +// Type always returns SharingPrivilege to satisfy [pflag.Value] interface +func (f *SharingPrivilege) Type() string { + return "SharingPrivilege" +} + +type SharingPrivilegeAssignment struct { + // The principal (user email address or group name). + Principal string `json:"principal,omitempty"` + // The privileges assigned to the principal. + Privileges []SharingPrivilege `json:"privileges,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SharingPrivilegeAssignment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SharingPrivilegeAssignment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdatePermissionsResponse struct { +} + +type UpdateProvider struct { + // Description about the provider. + Comment string `json:"comment,omitempty"` + // Name of the provider. + Name string `json:"-" url:"-"` + // New name for the provider. + NewName string `json:"new_name,omitempty"` + // Username of Provider owner. + Owner string `json:"owner,omitempty"` + // This field is required when the __authentication_type__ is **TOKEN**, + // **OAUTH_CLIENT_CREDENTIALS** or not provided. + RecipientProfileStr string `json:"recipient_profile_str,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateProvider) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateProvider) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateRecipient struct { + // Description about the recipient. + Comment string `json:"comment,omitempty"` + // Expiration timestamp of the token, in epoch milliseconds. + ExpirationTime int64 `json:"expiration_time,omitempty"` + // IP Access List + IpAccessList *IpAccessList `json:"ip_access_list,omitempty"` + // Name of the recipient. + Name string `json:"-" url:"-"` + // New name for the recipient. . + NewName string `json:"new_name,omitempty"` + // Username of the recipient owner. + Owner string `json:"owner,omitempty"` + // Recipient properties as map of string key-value pairs. When provided in + // update request, the specified properties will override the existing + // properties. To add and remove properties, one would need to perform a + // read-modify-write. + PropertiesKvpairs *SecurablePropertiesKvPairs `json:"properties_kvpairs,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateRecipient) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateRecipient) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateShare struct { + // User-provided free-form text description. + Comment string `json:"comment,omitempty"` + // The name of the share. + Name string `json:"-" url:"-"` + // New name for the share. + NewName string `json:"new_name,omitempty"` + // Username of current owner of share. + Owner string `json:"owner,omitempty"` + // Storage root URL for the share. + StorageRoot string `json:"storage_root,omitempty"` + // Array of shared data object updates. + Updates []SharedDataObjectUpdate `json:"updates,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateShare) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateShare) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateSharePermissions struct { + // Array of permission changes. + Changes []PermissionsChange `json:"changes,omitempty"` + // Maximum number of permissions to return. - when set to 0, the page length + // is set to a server configured value (recommended); - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to a value less than 0, an invalid parameter + // error is returned; - If not set, all valid permissions are returned (not + // recommended). - Note: The number of returned permissions might be less + // than the specified max_results size, even zero. The only definitive + // indication that no further permissions can be fetched is when the + // next_page_token is unset from the response. + MaxResults int `json:"-" url:"max_results,omitempty"` + // The name of the share. + Name string `json:"-" url:"-"` + // Opaque pagination token to go to next page based on previous query. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateSharePermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateSharePermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} diff --git a/sql/v2preview/api.go b/sql/v2preview/api.go new file mode 100755 index 000000000..94ebafd2e --- /dev/null +++ b/sql/v2preview/api.go @@ -0,0 +1,1842 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Alerts Legacy Preview, Alerts Preview, Dashboard Widgets Preview, Dashboards Preview, Data Sources Preview, Dbsql Permissions Preview, Queries Legacy Preview, Queries Preview, Query History Preview, Query Visualizations Legacy Preview, Query Visualizations Preview, Redash Config Preview, Statement Execution Preview, Warehouses Preview, etc. +package sqlpreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type AlertsLegacyPreviewInterface interface { + + // Create an alert. + // + // Creates an alert. An alert is a Databricks SQL object that periodically runs + // a query, evaluates a condition of its result, and notifies users or + // notification destinations if the condition was met. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/create instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Create(ctx context.Context, request CreateAlert) (*LegacyAlert, error) + + // Delete an alert. + // + // Deletes an alert. Deleted alerts are no longer accessible and cannot be + // restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to + // the trash. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/delete instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Delete(ctx context.Context, request DeleteAlertsLegacyRequest) error + + // Delete an alert. + // + // Deletes an alert. Deleted alerts are no longer accessible and cannot be + // restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to + // the trash. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/delete instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + DeleteByAlertId(ctx context.Context, alertId string) error + + // Get an alert. + // + // Gets an alert. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/get instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Get(ctx context.Context, request GetAlertsLegacyRequest) (*LegacyAlert, error) + + // Get an alert. + // + // Gets an alert. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/get instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + GetByAlertId(ctx context.Context, alertId string) (*LegacyAlert, error) + + // Get alerts. + // + // Gets a list of alerts. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/list instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + List(ctx context.Context) ([]LegacyAlert, error) + + // LegacyAlertNameToIdMap calls [AlertsLegacyPreviewAPI.List] and creates a map of results with [LegacyAlert].Name as key and [LegacyAlert].Id as value. + // + // Returns an error if there's more than one [LegacyAlert] with the same .Name. + // + // Note: All [LegacyAlert] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + LegacyAlertNameToIdMap(ctx context.Context) (map[string]string, error) + + // GetByName calls [AlertsLegacyPreviewAPI.LegacyAlertNameToIdMap] and returns a single [LegacyAlert]. + // + // Returns an error if there's more than one [LegacyAlert] with the same .Name. + // + // Note: All [LegacyAlert] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*LegacyAlert, error) + + // Update an alert. + // + // Updates an alert. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/update instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Update(ctx context.Context, request EditAlert) error +} + +func NewAlertsLegacyPreview(client *client.DatabricksClient) *AlertsLegacyPreviewAPI { + return &AlertsLegacyPreviewAPI{ + alertsLegacyPreviewImpl: alertsLegacyPreviewImpl{ + client: client, + }, + } +} + +// The alerts API can be used to perform CRUD operations on alerts. An alert is +// a Databricks SQL object that periodically runs a query, evaluates a condition +// of its result, and notifies one or more users and/or notification +// destinations if the condition was met. Alerts can be scheduled using the +// `sql_task` type of the Jobs API, e.g. :method:jobs/create. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// see the latest version. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +type AlertsLegacyPreviewAPI struct { + alertsLegacyPreviewImpl +} + +// Delete an alert. +// +// Deletes an alert. Deleted alerts are no longer accessible and cannot be +// restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to +// the trash. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:alerts/delete instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *AlertsLegacyPreviewAPI) DeleteByAlertId(ctx context.Context, alertId string) error { + return a.alertsLegacyPreviewImpl.Delete(ctx, DeleteAlertsLegacyRequest{ + AlertId: alertId, + }) +} + +// Get an alert. +// +// Gets an alert. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:alerts/get instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *AlertsLegacyPreviewAPI) GetByAlertId(ctx context.Context, alertId string) (*LegacyAlert, error) { + return a.alertsLegacyPreviewImpl.Get(ctx, GetAlertsLegacyRequest{ + AlertId: alertId, + }) +} + +// LegacyAlertNameToIdMap calls [AlertsLegacyPreviewAPI.List] and creates a map of results with [LegacyAlert].Name as key and [LegacyAlert].Id as value. +// +// Returns an error if there's more than one [LegacyAlert] with the same .Name. +// +// Note: All [LegacyAlert] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AlertsLegacyPreviewAPI) LegacyAlertNameToIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.List(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByName calls [AlertsLegacyPreviewAPI.LegacyAlertNameToIdMap] and returns a single [LegacyAlert]. +// +// Returns an error if there's more than one [LegacyAlert] with the same .Name. +// +// Note: All [LegacyAlert] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AlertsLegacyPreviewAPI) GetByName(ctx context.Context, name string) (*LegacyAlert, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.List(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]LegacyAlert{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("LegacyAlert named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of LegacyAlert named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type AlertsPreviewInterface interface { + + // Create an alert. + // + // Creates an alert. + Create(ctx context.Context, request CreateAlertRequest) (*Alert, error) + + // Delete an alert. + // + // Moves an alert to the trash. Trashed alerts immediately disappear from + // searches and list views, and can no longer trigger. You can restore a trashed + // alert through the UI. A trashed alert is permanently deleted after 30 days. + Delete(ctx context.Context, request TrashAlertRequest) error + + // Delete an alert. + // + // Moves an alert to the trash. Trashed alerts immediately disappear from + // searches and list views, and can no longer trigger. You can restore a trashed + // alert through the UI. A trashed alert is permanently deleted after 30 days. + DeleteById(ctx context.Context, id string) error + + // Get an alert. + // + // Gets an alert. + Get(ctx context.Context, request GetAlertRequest) (*Alert, error) + + // Get an alert. + // + // Gets an alert. + GetById(ctx context.Context, id string) (*Alert, error) + + // List alerts. + // + // Gets a list of alerts accessible to the user, ordered by creation time. + // **Warning:** Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAlertsRequest) listing.Iterator[ListAlertsResponseAlert] + + // List alerts. + // + // Gets a list of alerts accessible to the user, ordered by creation time. + // **Warning:** Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAlertsRequest) ([]ListAlertsResponseAlert, error) + + // ListAlertsResponseAlertDisplayNameToIdMap calls [AlertsPreviewAPI.ListAll] and creates a map of results with [ListAlertsResponseAlert].DisplayName as key and [ListAlertsResponseAlert].Id as value. + // + // Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. + // + // Note: All [ListAlertsResponseAlert] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ListAlertsResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsRequest) (map[string]string, error) + + // GetByDisplayName calls [AlertsPreviewAPI.ListAlertsResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsResponseAlert]. + // + // Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. + // + // Note: All [ListAlertsResponseAlert] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByDisplayName(ctx context.Context, name string) (*ListAlertsResponseAlert, error) + + // Update an alert. + // + // Updates an alert. + Update(ctx context.Context, request UpdateAlertRequest) (*Alert, error) +} + +func NewAlertsPreview(client *client.DatabricksClient) *AlertsPreviewAPI { + return &AlertsPreviewAPI{ + alertsPreviewImpl: alertsPreviewImpl{ + client: client, + }, + } +} + +// The alerts API can be used to perform CRUD operations on alerts. An alert is +// a Databricks SQL object that periodically runs a query, evaluates a condition +// of its result, and notifies one or more users and/or notification +// destinations if the condition was met. Alerts can be scheduled using the +// `sql_task` type of the Jobs API, e.g. :method:jobs/create. +type AlertsPreviewAPI struct { + alertsPreviewImpl +} + +// Delete an alert. +// +// Moves an alert to the trash. Trashed alerts immediately disappear from +// searches and list views, and can no longer trigger. You can restore a trashed +// alert through the UI. A trashed alert is permanently deleted after 30 days. +func (a *AlertsPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.alertsPreviewImpl.Delete(ctx, TrashAlertRequest{ + Id: id, + }) +} + +// Get an alert. +// +// Gets an alert. +func (a *AlertsPreviewAPI) GetById(ctx context.Context, id string) (*Alert, error) { + return a.alertsPreviewImpl.Get(ctx, GetAlertRequest{ + Id: id, + }) +} + +// ListAlertsResponseAlertDisplayNameToIdMap calls [AlertsPreviewAPI.ListAll] and creates a map of results with [ListAlertsResponseAlert].DisplayName as key and [ListAlertsResponseAlert].Id as value. +// +// Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. +// +// Note: All [ListAlertsResponseAlert] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AlertsPreviewAPI) ListAlertsResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.DisplayName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .DisplayName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByDisplayName calls [AlertsPreviewAPI.ListAlertsResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsResponseAlert]. +// +// Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. +// +// Note: All [ListAlertsResponseAlert] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AlertsPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*ListAlertsResponseAlert, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListAlertsRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ListAlertsResponseAlert{} + for _, v := range result { + key := v.DisplayName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ListAlertsResponseAlert named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ListAlertsResponseAlert named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type DashboardWidgetsPreviewInterface interface { + + // Add widget to a dashboard. + Create(ctx context.Context, request CreateWidget) (*Widget, error) + + // Remove widget. + Delete(ctx context.Context, request DeleteDashboardWidgetRequest) error + + // Remove widget. + DeleteById(ctx context.Context, id string) error + + // Update existing widget. + Update(ctx context.Context, request CreateWidget) (*Widget, error) +} + +func NewDashboardWidgetsPreview(client *client.DatabricksClient) *DashboardWidgetsPreviewAPI { + return &DashboardWidgetsPreviewAPI{ + dashboardWidgetsPreviewImpl: dashboardWidgetsPreviewImpl{ + client: client, + }, + } +} + +// This is an evolving API that facilitates the addition and removal of widgets +// from existing dashboards within the Databricks Workspace. Data structures may +// change over time. +type DashboardWidgetsPreviewAPI struct { + dashboardWidgetsPreviewImpl +} + +// Remove widget. +func (a *DashboardWidgetsPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.dashboardWidgetsPreviewImpl.Delete(ctx, DeleteDashboardWidgetRequest{ + Id: id, + }) +} + +type DashboardsPreviewInterface interface { + + // Create a dashboard object. + Create(ctx context.Context, request DashboardPostContent) (*Dashboard, error) + + // Remove a dashboard. + // + // Moves a dashboard to the trash. Trashed dashboards do not appear in list + // views or searches, and cannot be shared. + Delete(ctx context.Context, request DeleteDashboardRequest) error + + // Remove a dashboard. + // + // Moves a dashboard to the trash. Trashed dashboards do not appear in list + // views or searches, and cannot be shared. + DeleteByDashboardId(ctx context.Context, dashboardId string) error + + // Retrieve a definition. + // + // Returns a JSON representation of a dashboard object, including its + // visualization and query objects. + Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error) + + // Retrieve a definition. + // + // Returns a JSON representation of a dashboard object, including its + // visualization and query objects. + GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error) + + // Get dashboard objects. + // + // Fetch a paginated list of dashboard objects. + // + // **Warning**: Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] + + // Get dashboard objects. + // + // Fetch a paginated list of dashboard objects. + // + // **Warning**: Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) + + // DashboardNameToIdMap calls [DashboardsPreviewAPI.ListAll] and creates a map of results with [Dashboard].Name as key and [Dashboard].Id as value. + // + // Returns an error if there's more than one [Dashboard] with the same .Name. + // + // Note: All [Dashboard] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + DashboardNameToIdMap(ctx context.Context, request ListDashboardsRequest) (map[string]string, error) + + // GetByName calls [DashboardsPreviewAPI.DashboardNameToIdMap] and returns a single [Dashboard]. + // + // Returns an error if there's more than one [Dashboard] with the same .Name. + // + // Note: All [Dashboard] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*Dashboard, error) + + // Restore a dashboard. + // + // A restored dashboard appears in list views and searches and can be shared. + Restore(ctx context.Context, request RestoreDashboardRequest) error + + // Change a dashboard definition. + // + // Modify this dashboard definition. This operation only affects attributes of + // the dashboard object. It does not add, modify, or remove widgets. + // + // **Note**: You cannot undo this operation. + Update(ctx context.Context, request DashboardEditContent) (*Dashboard, error) +} + +func NewDashboardsPreview(client *client.DatabricksClient) *DashboardsPreviewAPI { + return &DashboardsPreviewAPI{ + dashboardsPreviewImpl: dashboardsPreviewImpl{ + client: client, + }, + } +} + +// In general, there is little need to modify dashboards using the API. However, +// it can be useful to use dashboard objects to look-up a collection of related +// query IDs. The API can also be used to duplicate multiple dashboards at once +// since you can get a dashboard definition with a GET request and then POST it +// to create a new one. Dashboards can be scheduled using the `sql_task` type of +// the Jobs API, e.g. :method:jobs/create. +type DashboardsPreviewAPI struct { + dashboardsPreviewImpl +} + +// Remove a dashboard. +// +// Moves a dashboard to the trash. Trashed dashboards do not appear in list +// views or searches, and cannot be shared. +func (a *DashboardsPreviewAPI) DeleteByDashboardId(ctx context.Context, dashboardId string) error { + return a.dashboardsPreviewImpl.Delete(ctx, DeleteDashboardRequest{ + DashboardId: dashboardId, + }) +} + +// Retrieve a definition. +// +// Returns a JSON representation of a dashboard object, including its +// visualization and query objects. +func (a *DashboardsPreviewAPI) GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error) { + return a.dashboardsPreviewImpl.Get(ctx, GetDashboardRequest{ + DashboardId: dashboardId, + }) +} + +// DashboardNameToIdMap calls [DashboardsPreviewAPI.ListAll] and creates a map of results with [Dashboard].Name as key and [Dashboard].Id as value. +// +// Returns an error if there's more than one [Dashboard] with the same .Name. +// +// Note: All [Dashboard] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *DashboardsPreviewAPI) DashboardNameToIdMap(ctx context.Context, request ListDashboardsRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByName calls [DashboardsPreviewAPI.DashboardNameToIdMap] and returns a single [Dashboard]. +// +// Returns an error if there's more than one [Dashboard] with the same .Name. +// +// Note: All [Dashboard] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *DashboardsPreviewAPI) GetByName(ctx context.Context, name string) (*Dashboard, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListDashboardsRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]Dashboard{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("Dashboard named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of Dashboard named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type DataSourcesPreviewInterface interface { + + // Get a list of SQL warehouses. + // + // Retrieves a full list of SQL warehouses available in this workspace. All + // fields that appear in this API response are enumerated for clarity. However, + // you need only a SQL warehouse's `id` to create new queries against it. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:warehouses/list instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + List(ctx context.Context) ([]DataSource, error) + + // DataSourceNameToIdMap calls [DataSourcesPreviewAPI.List] and creates a map of results with [DataSource].Name as key and [DataSource].Id as value. + // + // Returns an error if there's more than one [DataSource] with the same .Name. + // + // Note: All [DataSource] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + DataSourceNameToIdMap(ctx context.Context) (map[string]string, error) + + // GetByName calls [DataSourcesPreviewAPI.DataSourceNameToIdMap] and returns a single [DataSource]. + // + // Returns an error if there's more than one [DataSource] with the same .Name. + // + // Note: All [DataSource] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*DataSource, error) +} + +func NewDataSourcesPreview(client *client.DatabricksClient) *DataSourcesPreviewAPI { + return &DataSourcesPreviewAPI{ + dataSourcesPreviewImpl: dataSourcesPreviewImpl{ + client: client, + }, + } +} + +// This API is provided to assist you in making new query objects. When creating +// a query object, you may optionally specify a `data_source_id` for the SQL +// warehouse against which it will run. If you don't already know the +// `data_source_id` for your desired SQL warehouse, this API will help you find +// it. +// +// This API does not support searches. It returns the full list of SQL +// warehouses in your workspace. We advise you to use any text editor, REST +// client, or `grep` to search the response from this API for the name of your +// SQL warehouse as it appears in Databricks SQL. +// +// **Note**: A new version of the Databricks SQL API is now available. [Learn +// more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +type DataSourcesPreviewAPI struct { + dataSourcesPreviewImpl +} + +// DataSourceNameToIdMap calls [DataSourcesPreviewAPI.List] and creates a map of results with [DataSource].Name as key and [DataSource].Id as value. +// +// Returns an error if there's more than one [DataSource] with the same .Name. +// +// Note: All [DataSource] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *DataSourcesPreviewAPI) DataSourceNameToIdMap(ctx context.Context) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.List(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByName calls [DataSourcesPreviewAPI.DataSourceNameToIdMap] and returns a single [DataSource]. +// +// Returns an error if there's more than one [DataSource] with the same .Name. +// +// Note: All [DataSource] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *DataSourcesPreviewAPI) GetByName(ctx context.Context, name string) (*DataSource, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.List(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]DataSource{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("DataSource named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of DataSource named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type DbsqlPermissionsPreviewInterface interface { + + // Get object ACL. + // + // Gets a JSON representation of the access control list (ACL) for a specified + // object. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:workspace/getpermissions instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error) + + // Get object ACL. + // + // Gets a JSON representation of the access control list (ACL) for a specified + // object. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:workspace/getpermissions instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + GetByObjectTypeAndObjectId(ctx context.Context, objectType ObjectTypePlural, objectId string) (*GetResponse, error) + + // Set object ACL. + // + // Sets the access control list (ACL) for a specified object. This operation + // will complete rewrite the ACL. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:workspace/setpermissions instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Set(ctx context.Context, request SetRequest) (*SetResponse, error) + + // Transfer object ownership. + // + // Transfers ownership of a dashboard, query, or alert to an active user. + // Requires an admin API key. + // + // **Note**: A new version of the Databricks SQL API is now available. For + // queries and alerts, please use :method:queries/update and + // :method:alerts/update respectively instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error) +} + +func NewDbsqlPermissionsPreview(client *client.DatabricksClient) *DbsqlPermissionsPreviewAPI { + return &DbsqlPermissionsPreviewAPI{ + dbsqlPermissionsPreviewImpl: dbsqlPermissionsPreviewImpl{ + client: client, + }, + } +} + +// The SQL Permissions API is similar to the endpoints of the +// :method:permissions/set. However, this exposes only one endpoint, which gets +// the Access Control List for a given object. You cannot modify any permissions +// using this API. +// +// There are three levels of permission: +// +// - `CAN_VIEW`: Allows read-only access +// +// - `CAN_RUN`: Allows read access and run access (superset of `CAN_VIEW`) +// +// - `CAN_MANAGE`: Allows all actions: read, run, edit, delete, modify +// permissions (superset of `CAN_RUN`) +// +// **Note**: A new version of the Databricks SQL API is now available. [Learn +// more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +type DbsqlPermissionsPreviewAPI struct { + dbsqlPermissionsPreviewImpl +} + +// Get object ACL. +// +// Gets a JSON representation of the access control list (ACL) for a specified +// object. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:workspace/getpermissions instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *DbsqlPermissionsPreviewAPI) GetByObjectTypeAndObjectId(ctx context.Context, objectType ObjectTypePlural, objectId string) (*GetResponse, error) { + return a.dbsqlPermissionsPreviewImpl.Get(ctx, GetDbsqlPermissionRequest{ + ObjectType: objectType, + ObjectId: objectId, + }) +} + +type QueriesLegacyPreviewInterface interface { + + // Create a new query definition. + // + // Creates a new query definition. Queries created with this endpoint belong to + // the authenticated user making the request. + // + // The `data_source_id` field specifies the ID of the SQL warehouse to run this + // query against. You can use the Data Sources API to see a complete list of + // available SQL warehouses. Or you can copy the `data_source_id` from an + // existing query. + // + // **Note**: You cannot add a visualization until you create the query. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queries/create instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Create(ctx context.Context, request QueryPostContent) (*LegacyQuery, error) + + // Delete a query. + // + // Moves a query to the trash. Trashed queries immediately disappear from + // searches and list views, and they cannot be used for alerts. The trash is + // deleted after 30 days. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queries/delete instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Delete(ctx context.Context, request DeleteQueriesLegacyRequest) error + + // Delete a query. + // + // Moves a query to the trash. Trashed queries immediately disappear from + // searches and list views, and they cannot be used for alerts. The trash is + // deleted after 30 days. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queries/delete instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + DeleteByQueryId(ctx context.Context, queryId string) error + + // Get a query definition. + // + // Retrieve a query object definition along with contextual permissions + // information about the currently authenticated user. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queries/get instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Get(ctx context.Context, request GetQueriesLegacyRequest) (*LegacyQuery, error) + + // Get a query definition. + // + // Retrieve a query object definition along with contextual permissions + // information about the currently authenticated user. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queries/get instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + GetByQueryId(ctx context.Context, queryId string) (*LegacyQuery, error) + + // Get a list of queries. + // + // Gets a list of queries. Optionally, this list can be filtered by a search + // term. + // + // **Warning**: Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queries/list instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListQueriesLegacyRequest) listing.Iterator[LegacyQuery] + + // Get a list of queries. + // + // Gets a list of queries. Optionally, this list can be filtered by a search + // term. + // + // **Warning**: Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queries/list instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListQueriesLegacyRequest) ([]LegacyQuery, error) + + // LegacyQueryNameToIdMap calls [QueriesLegacyPreviewAPI.ListAll] and creates a map of results with [LegacyQuery].Name as key and [LegacyQuery].Id as value. + // + // Returns an error if there's more than one [LegacyQuery] with the same .Name. + // + // Note: All [LegacyQuery] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + LegacyQueryNameToIdMap(ctx context.Context, request ListQueriesLegacyRequest) (map[string]string, error) + + // GetByName calls [QueriesLegacyPreviewAPI.LegacyQueryNameToIdMap] and returns a single [LegacyQuery]. + // + // Returns an error if there's more than one [LegacyQuery] with the same .Name. + // + // Note: All [LegacyQuery] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*LegacyQuery, error) + + // Restore a query. + // + // Restore a query that has been moved to the trash. A restored query appears in + // list views and searches. You can use restored queries for alerts. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // see the latest version. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Restore(ctx context.Context, request RestoreQueriesLegacyRequest) error + + // Change a query definition. + // + // Modify this query definition. + // + // **Note**: You cannot undo this operation. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queries/update instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Update(ctx context.Context, request QueryEditContent) (*LegacyQuery, error) +} + +func NewQueriesLegacyPreview(client *client.DatabricksClient) *QueriesLegacyPreviewAPI { + return &QueriesLegacyPreviewAPI{ + queriesLegacyPreviewImpl: queriesLegacyPreviewImpl{ + client: client, + }, + } +} + +// These endpoints are used for CRUD operations on query definitions. Query +// definitions include the target SQL warehouse, query text, name, description, +// tags, parameters, and visualizations. Queries can be scheduled using the +// `sql_task` type of the Jobs API, e.g. :method:jobs/create. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// see the latest version. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +type QueriesLegacyPreviewAPI struct { + queriesLegacyPreviewImpl +} + +// Delete a query. +// +// Moves a query to the trash. Trashed queries immediately disappear from +// searches and list views, and they cannot be used for alerts. The trash is +// deleted after 30 days. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:queries/delete instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *QueriesLegacyPreviewAPI) DeleteByQueryId(ctx context.Context, queryId string) error { + return a.queriesLegacyPreviewImpl.Delete(ctx, DeleteQueriesLegacyRequest{ + QueryId: queryId, + }) +} + +// Get a query definition. +// +// Retrieve a query object definition along with contextual permissions +// information about the currently authenticated user. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:queries/get instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *QueriesLegacyPreviewAPI) GetByQueryId(ctx context.Context, queryId string) (*LegacyQuery, error) { + return a.queriesLegacyPreviewImpl.Get(ctx, GetQueriesLegacyRequest{ + QueryId: queryId, + }) +} + +// LegacyQueryNameToIdMap calls [QueriesLegacyPreviewAPI.ListAll] and creates a map of results with [LegacyQuery].Name as key and [LegacyQuery].Id as value. +// +// Returns an error if there's more than one [LegacyQuery] with the same .Name. +// +// Note: All [LegacyQuery] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *QueriesLegacyPreviewAPI) LegacyQueryNameToIdMap(ctx context.Context, request ListQueriesLegacyRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByName calls [QueriesLegacyPreviewAPI.LegacyQueryNameToIdMap] and returns a single [LegacyQuery]. +// +// Returns an error if there's more than one [LegacyQuery] with the same .Name. +// +// Note: All [LegacyQuery] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *QueriesLegacyPreviewAPI) GetByName(ctx context.Context, name string) (*LegacyQuery, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListQueriesLegacyRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]LegacyQuery{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("LegacyQuery named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of LegacyQuery named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type QueriesPreviewInterface interface { + + // Create a query. + // + // Creates a query. + Create(ctx context.Context, request CreateQueryRequest) (*Query, error) + + // Delete a query. + // + // Moves a query to the trash. Trashed queries immediately disappear from + // searches and list views, and cannot be used for alerts. You can restore a + // trashed query through the UI. A trashed query is permanently deleted after 30 + // days. + Delete(ctx context.Context, request TrashQueryRequest) error + + // Delete a query. + // + // Moves a query to the trash. Trashed queries immediately disappear from + // searches and list views, and cannot be used for alerts. You can restore a + // trashed query through the UI. A trashed query is permanently deleted after 30 + // days. + DeleteById(ctx context.Context, id string) error + + // Get a query. + // + // Gets a query. + Get(ctx context.Context, request GetQueryRequest) (*Query, error) + + // Get a query. + // + // Gets a query. + GetById(ctx context.Context, id string) (*Query, error) + + // List queries. + // + // Gets a list of queries accessible to the user, ordered by creation time. + // **Warning:** Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListQueriesRequest) listing.Iterator[ListQueryObjectsResponseQuery] + + // List queries. + // + // Gets a list of queries accessible to the user, ordered by creation time. + // **Warning:** Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListQueriesRequest) ([]ListQueryObjectsResponseQuery, error) + + // ListQueryObjectsResponseQueryDisplayNameToIdMap calls [QueriesPreviewAPI.ListAll] and creates a map of results with [ListQueryObjectsResponseQuery].DisplayName as key and [ListQueryObjectsResponseQuery].Id as value. + // + // Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. + // + // Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ListQueryObjectsResponseQueryDisplayNameToIdMap(ctx context.Context, request ListQueriesRequest) (map[string]string, error) + + // GetByDisplayName calls [QueriesPreviewAPI.ListQueryObjectsResponseQueryDisplayNameToIdMap] and returns a single [ListQueryObjectsResponseQuery]. + // + // Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. + // + // Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByDisplayName(ctx context.Context, name string) (*ListQueryObjectsResponseQuery, error) + + // List visualizations on a query. + // + // Gets a list of visualizations on a query. + // + // This method is generated by Databricks SDK Code Generator. + ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) listing.Iterator[Visualization] + + // List visualizations on a query. + // + // Gets a list of visualizations on a query. + // + // This method is generated by Databricks SDK Code Generator. + ListVisualizationsAll(ctx context.Context, request ListVisualizationsForQueryRequest) ([]Visualization, error) + + // List visualizations on a query. + // + // Gets a list of visualizations on a query. + ListVisualizationsById(ctx context.Context, id string) (*ListVisualizationsForQueryResponse, error) + + // Update a query. + // + // Updates a query. + Update(ctx context.Context, request UpdateQueryRequest) (*Query, error) +} + +func NewQueriesPreview(client *client.DatabricksClient) *QueriesPreviewAPI { + return &QueriesPreviewAPI{ + queriesPreviewImpl: queriesPreviewImpl{ + client: client, + }, + } +} + +// The queries API can be used to perform CRUD operations on queries. A query is +// a Databricks SQL object that includes the target SQL warehouse, query text, +// name, description, tags, and parameters. Queries can be scheduled using the +// `sql_task` type of the Jobs API, e.g. :method:jobs/create. +type QueriesPreviewAPI struct { + queriesPreviewImpl +} + +// Delete a query. +// +// Moves a query to the trash. Trashed queries immediately disappear from +// searches and list views, and cannot be used for alerts. You can restore a +// trashed query through the UI. A trashed query is permanently deleted after 30 +// days. +func (a *QueriesPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.queriesPreviewImpl.Delete(ctx, TrashQueryRequest{ + Id: id, + }) +} + +// Get a query. +// +// Gets a query. +func (a *QueriesPreviewAPI) GetById(ctx context.Context, id string) (*Query, error) { + return a.queriesPreviewImpl.Get(ctx, GetQueryRequest{ + Id: id, + }) +} + +// ListQueryObjectsResponseQueryDisplayNameToIdMap calls [QueriesPreviewAPI.ListAll] and creates a map of results with [ListQueryObjectsResponseQuery].DisplayName as key and [ListQueryObjectsResponseQuery].Id as value. +// +// Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. +// +// Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *QueriesPreviewAPI) ListQueryObjectsResponseQueryDisplayNameToIdMap(ctx context.Context, request ListQueriesRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.DisplayName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .DisplayName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByDisplayName calls [QueriesPreviewAPI.ListQueryObjectsResponseQueryDisplayNameToIdMap] and returns a single [ListQueryObjectsResponseQuery]. +// +// Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. +// +// Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *QueriesPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*ListQueryObjectsResponseQuery, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListQueriesRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ListQueryObjectsResponseQuery{} + for _, v := range result { + key := v.DisplayName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ListQueryObjectsResponseQuery named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ListQueryObjectsResponseQuery named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +// List visualizations on a query. +// +// Gets a list of visualizations on a query. +func (a *QueriesPreviewAPI) ListVisualizationsById(ctx context.Context, id string) (*ListVisualizationsForQueryResponse, error) { + return a.queriesPreviewImpl.internalListVisualizations(ctx, ListVisualizationsForQueryRequest{ + Id: id, + }) +} + +type QueryHistoryPreviewInterface interface { + + // List Queries. + // + // List the history of queries through SQL warehouses, and serverless compute. + // + // You can filter by user ID, warehouse ID, status, and time range. Most + // recently started queries are returned first (up to max_results in request). + // The pagination token returned in response can be used to list subsequent + // query statuses. + List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error) +} + +func NewQueryHistoryPreview(client *client.DatabricksClient) *QueryHistoryPreviewAPI { + return &QueryHistoryPreviewAPI{ + queryHistoryPreviewImpl: queryHistoryPreviewImpl{ + client: client, + }, + } +} + +// A service responsible for storing and retrieving the list of queries run +// against SQL endpoints and serverless compute. +type QueryHistoryPreviewAPI struct { + queryHistoryPreviewImpl +} + +type QueryVisualizationsLegacyPreviewInterface interface { + + // Add visualization to a query. + // + // Creates visualization in the query. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queryvisualizations/create instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Create(ctx context.Context, request CreateQueryVisualizationsLegacyRequest) (*LegacyVisualization, error) + + // Remove visualization. + // + // Removes a visualization from the query. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queryvisualizations/delete instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Delete(ctx context.Context, request DeleteQueryVisualizationsLegacyRequest) error + + // Remove visualization. + // + // Removes a visualization from the query. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queryvisualizations/delete instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + DeleteById(ctx context.Context, id string) error + + // Edit existing visualization. + // + // Updates visualization in the query. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:queryvisualizations/update instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Update(ctx context.Context, request LegacyVisualization) (*LegacyVisualization, error) +} + +func NewQueryVisualizationsLegacyPreview(client *client.DatabricksClient) *QueryVisualizationsLegacyPreviewAPI { + return &QueryVisualizationsLegacyPreviewAPI{ + queryVisualizationsLegacyPreviewImpl: queryVisualizationsLegacyPreviewImpl{ + client: client, + }, + } +} + +// This is an evolving API that facilitates the addition and removal of +// vizualisations from existing queries within the Databricks Workspace. Data +// structures may change over time. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// see the latest version. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +type QueryVisualizationsLegacyPreviewAPI struct { + queryVisualizationsLegacyPreviewImpl +} + +// Remove visualization. +// +// Removes a visualization from the query. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:queryvisualizations/delete instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *QueryVisualizationsLegacyPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.queryVisualizationsLegacyPreviewImpl.Delete(ctx, DeleteQueryVisualizationsLegacyRequest{ + Id: id, + }) +} + +type QueryVisualizationsPreviewInterface interface { + + // Add a visualization to a query. + // + // Adds a visualization to a query. + Create(ctx context.Context, request CreateVisualizationRequest) (*Visualization, error) + + // Remove a visualization. + // + // Removes a visualization. + Delete(ctx context.Context, request DeleteVisualizationRequest) error + + // Remove a visualization. + // + // Removes a visualization. + DeleteById(ctx context.Context, id string) error + + // Update a visualization. + // + // Updates a visualization. + Update(ctx context.Context, request UpdateVisualizationRequest) (*Visualization, error) +} + +func NewQueryVisualizationsPreview(client *client.DatabricksClient) *QueryVisualizationsPreviewAPI { + return &QueryVisualizationsPreviewAPI{ + queryVisualizationsPreviewImpl: queryVisualizationsPreviewImpl{ + client: client, + }, + } +} + +// This is an evolving API that facilitates the addition and removal of +// visualizations from existing queries in the Databricks Workspace. Data +// structures can change over time. +type QueryVisualizationsPreviewAPI struct { + queryVisualizationsPreviewImpl +} + +// Remove a visualization. +// +// Removes a visualization. +func (a *QueryVisualizationsPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.queryVisualizationsPreviewImpl.Delete(ctx, DeleteVisualizationRequest{ + Id: id, + }) +} + +type RedashConfigPreviewInterface interface { + + // Read workspace configuration for Redash-v2. + GetConfig(ctx context.Context) (*ClientConfig, error) +} + +func NewRedashConfigPreview(client *client.DatabricksClient) *RedashConfigPreviewAPI { + return &RedashConfigPreviewAPI{ + redashConfigPreviewImpl: redashConfigPreviewImpl{ + client: client, + }, + } +} + +// Redash V2 service for workspace configurations (internal) +type RedashConfigPreviewAPI struct { + redashConfigPreviewImpl +} + +type StatementExecutionPreviewInterface interface { + + // Cancel statement execution. + // + // Requests that an executing statement be canceled. Callers must poll for + // status to see the terminal state. + CancelExecution(ctx context.Context, request CancelExecutionRequest) error + + // Execute a SQL statement. + ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*StatementResponse, error) + + // Get status, manifest, and result first chunk. + // + // This request can be used to poll for the statement's status. When the + // `status.state` field is `SUCCEEDED` it will also return the result manifest + // and the first chunk of the result data. When the statement is in the terminal + // states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the state + // set. After at least 12 hours in terminal state, the statement is removed from + // the warehouse and further calls will receive an HTTP 404 response. + // + // **NOTE** This call currently might take up to 5 seconds to get the latest + // status and result. + GetStatement(ctx context.Context, request GetStatementRequest) (*StatementResponse, error) + + // Get status, manifest, and result first chunk. + // + // This request can be used to poll for the statement's status. When the + // `status.state` field is `SUCCEEDED` it will also return the result manifest + // and the first chunk of the result data. When the statement is in the terminal + // states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the state + // set. After at least 12 hours in terminal state, the statement is removed from + // the warehouse and further calls will receive an HTTP 404 response. + // + // **NOTE** This call currently might take up to 5 seconds to get the latest + // status and result. + GetStatementByStatementId(ctx context.Context, statementId string) (*StatementResponse, error) + + // Get result chunk by index. + // + // After the statement execution has `SUCCEEDED`, this request can be used to + // fetch any chunk by index. Whereas the first chunk with `chunk_index=0` is + // typically fetched with :method:statementexecution/executeStatement or + // :method:statementexecution/getStatement, this request can be used to fetch + // subsequent chunks. The response structure is identical to the nested `result` + // element described in the :method:statementexecution/getStatement request, and + // similarly includes the `next_chunk_index` and `next_chunk_internal_link` + // fields for simple iteration through the result set. + GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error) + + // Get result chunk by index. + // + // After the statement execution has `SUCCEEDED`, this request can be used to + // fetch any chunk by index. Whereas the first chunk with `chunk_index=0` is + // typically fetched with :method:statementexecution/executeStatement or + // :method:statementexecution/getStatement, this request can be used to fetch + // subsequent chunks. The response structure is identical to the nested `result` + // element described in the :method:statementexecution/getStatement request, and + // similarly includes the `next_chunk_index` and `next_chunk_internal_link` + // fields for simple iteration through the result set. + GetStatementResultChunkNByStatementIdAndChunkIndex(ctx context.Context, statementId string, chunkIndex int) (*ResultData, error) +} + +func NewStatementExecutionPreview(client *client.DatabricksClient) *StatementExecutionPreviewAPI { + return &StatementExecutionPreviewAPI{ + statementExecutionPreviewImpl: statementExecutionPreviewImpl{ + client: client, + }, + } +} + +// The Databricks SQL Statement Execution API can be used to execute SQL +// statements on a SQL warehouse and fetch the result. +// +// **Getting started** +// +// We suggest beginning with the [Databricks SQL Statement Execution API +// tutorial]. +// +// **Overview of statement execution and result fetching** +// +// Statement execution begins by issuing a +// :method:statementexecution/executeStatement request with a valid SQL +// statement and warehouse ID, along with optional parameters such as the data +// catalog and output format. If no other parameters are specified, the server +// will wait for up to 10s before returning a response. If the statement has +// completed within this timespan, the response will include the result data as +// a JSON array and metadata. Otherwise, if no result is available after the 10s +// timeout expired, the response will provide the statement ID that can be used +// to poll for results by using a :method:statementexecution/getStatement +// request. +// +// You can specify whether the call should behave synchronously, asynchronously +// or start synchronously with a fallback to asynchronous execution. This is +// controlled with the `wait_timeout` and `on_wait_timeout` settings. If +// `wait_timeout` is set between 5-50 seconds (default: 10s), the call waits for +// results up to the specified timeout; when set to `0s`, the call is +// asynchronous and responds immediately with a statement ID. The +// `on_wait_timeout` setting specifies what should happen when the timeout is +// reached while the statement execution has not yet finished. This can be set +// to either `CONTINUE`, to fallback to asynchronous mode, or it can be set to +// `CANCEL`, which cancels the statement. +// +// In summary: - Synchronous mode - `wait_timeout=30s` and +// `on_wait_timeout=CANCEL` - The call waits up to 30 seconds; if the statement +// execution finishes within this time, the result data is returned directly in +// the response. If the execution takes longer than 30 seconds, the execution is +// canceled and the call returns with a `CANCELED` state. - Asynchronous mode - +// `wait_timeout=0s` (`on_wait_timeout` is ignored) - The call doesn't wait for +// the statement to finish but returns directly with a statement ID. The status +// of the statement execution can be polled by issuing +// :method:statementexecution/getStatement with the statement ID. Once the +// execution has succeeded, this call also returns the result and metadata in +// the response. - Hybrid mode (default) - `wait_timeout=10s` and +// `on_wait_timeout=CONTINUE` - The call waits for up to 10 seconds; if the +// statement execution finishes within this time, the result data is returned +// directly in the response. If the execution takes longer than 10 seconds, a +// statement ID is returned. The statement ID can be used to fetch status and +// results in the same way as in the asynchronous mode. +// +// Depending on the size, the result can be split into multiple chunks. If the +// statement execution is successful, the statement response contains a manifest +// and the first chunk of the result. The manifest contains schema information +// and provides metadata for each chunk in the result. Result chunks can be +// retrieved by index with :method:statementexecution/getStatementResultChunkN +// which may be called in any order and in parallel. For sequential fetching, +// each chunk, apart from the last, also contains a `next_chunk_index` and +// `next_chunk_internal_link` that point to the next chunk. +// +// A statement can be canceled with :method:statementexecution/cancelExecution. +// +// **Fetching result data: format and disposition** +// +// To specify the format of the result data, use the `format` field, which can +// be set to one of the following options: `JSON_ARRAY` (JSON), `ARROW_STREAM` +// ([Apache Arrow Columnar]), or `CSV`. +// +// There are two ways to receive statement results, controlled by the +// `disposition` setting, which can be either `INLINE` or `EXTERNAL_LINKS`: +// +// - `INLINE`: In this mode, the result data is directly included in the +// response. It's best suited for smaller results. This mode can only be used +// with the `JSON_ARRAY` format. +// +// - `EXTERNAL_LINKS`: In this mode, the response provides links that can be +// used to download the result data in chunks separately. This approach is ideal +// for larger results and offers higher throughput. This mode can be used with +// all the formats: `JSON_ARRAY`, `ARROW_STREAM`, and `CSV`. +// +// By default, the API uses `format=JSON_ARRAY` and `disposition=INLINE`. +// +// **Limits and limitations** +// +// Note: The byte limit for INLINE disposition is based on internal storage +// metrics and will not exactly match the byte count of the actual payload. +// +// - Statements with `disposition=INLINE` are limited to 25 MiB and will fail +// when this limit is exceeded. - Statements with `disposition=EXTERNAL_LINKS` +// are limited to 100 GiB. Result sets larger than this limit will be truncated. +// Truncation is indicated by the `truncated` field in the result manifest. - +// The maximum query text size is 16 MiB. - Cancelation might silently fail. A +// successful response from a cancel request indicates that the cancel request +// was successfully received and sent to the processing engine. However, an +// outstanding statement might have already completed execution when the cancel +// request arrives. Polling for status until a terminal state is reached is a +// reliable way to determine the final state. - Wait timeouts are approximate, +// occur server-side, and cannot account for things such as caller delays and +// network latency from caller to service. - To guarantee that the statement is +// kept alive, you must poll at least once every 15 minutes. - The results are +// only available for one hour after success; polling does not extend this. - +// The SQL Execution API must be used for the entire lifecycle of the statement. +// For example, you cannot use the Jobs API to execute the command, and then the +// SQL Execution API to cancel it. +// +// [Apache Arrow Columnar]: https://arrow.apache.org/overview/ +// [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html +type StatementExecutionPreviewAPI struct { + statementExecutionPreviewImpl +} + +// Get status, manifest, and result first chunk. +// +// This request can be used to poll for the statement's status. When the +// `status.state` field is `SUCCEEDED` it will also return the result manifest +// and the first chunk of the result data. When the statement is in the terminal +// states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP 200 with the state +// set. After at least 12 hours in terminal state, the statement is removed from +// the warehouse and further calls will receive an HTTP 404 response. +// +// **NOTE** This call currently might take up to 5 seconds to get the latest +// status and result. +func (a *StatementExecutionPreviewAPI) GetStatementByStatementId(ctx context.Context, statementId string) (*StatementResponse, error) { + return a.statementExecutionPreviewImpl.GetStatement(ctx, GetStatementRequest{ + StatementId: statementId, + }) +} + +// Get result chunk by index. +// +// After the statement execution has `SUCCEEDED`, this request can be used to +// fetch any chunk by index. Whereas the first chunk with `chunk_index=0` is +// typically fetched with :method:statementexecution/executeStatement or +// :method:statementexecution/getStatement, this request can be used to fetch +// subsequent chunks. The response structure is identical to the nested `result` +// element described in the :method:statementexecution/getStatement request, and +// similarly includes the `next_chunk_index` and `next_chunk_internal_link` +// fields for simple iteration through the result set. +func (a *StatementExecutionPreviewAPI) GetStatementResultChunkNByStatementIdAndChunkIndex(ctx context.Context, statementId string, chunkIndex int) (*ResultData, error) { + return a.statementExecutionPreviewImpl.GetStatementResultChunkN(ctx, GetStatementResultChunkNRequest{ + StatementId: statementId, + ChunkIndex: chunkIndex, + }) +} + +type WarehousesPreviewInterface interface { + + // Create a warehouse. + // + // Creates a new SQL warehouse. + Create(ctx context.Context, request CreateWarehouseRequest) (*CreateWarehouseResponse, error) + + // Delete a warehouse. + // + // Deletes a SQL warehouse. + Delete(ctx context.Context, request DeleteWarehouseRequest) error + + // Delete a warehouse. + // + // Deletes a SQL warehouse. + DeleteById(ctx context.Context, id string) error + + // Update a warehouse. + // + // Updates the configuration for a SQL warehouse. + Edit(ctx context.Context, request EditWarehouseRequest) error + + // Get warehouse info. + // + // Gets the information for a single SQL warehouse. + Get(ctx context.Context, request GetWarehouseRequest) (*GetWarehouseResponse, error) + + // Get warehouse info. + // + // Gets the information for a single SQL warehouse. + GetById(ctx context.Context, id string) (*GetWarehouseResponse, error) + + // Get SQL warehouse permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetWarehousePermissionLevelsRequest) (*GetWarehousePermissionLevelsResponse, error) + + // Get SQL warehouse permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByWarehouseId(ctx context.Context, warehouseId string) (*GetWarehousePermissionLevelsResponse, error) + + // Get SQL warehouse permissions. + // + // Gets the permissions of a SQL warehouse. SQL warehouses can inherit + // permissions from their root object. + GetPermissions(ctx context.Context, request GetWarehousePermissionsRequest) (*WarehousePermissions, error) + + // Get SQL warehouse permissions. + // + // Gets the permissions of a SQL warehouse. SQL warehouses can inherit + // permissions from their root object. + GetPermissionsByWarehouseId(ctx context.Context, warehouseId string) (*WarehousePermissions, error) + + // Get the workspace configuration. + // + // Gets the workspace level configuration that is shared by all SQL warehouses + // in a workspace. + GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error) + + // List warehouses. + // + // Lists all SQL warehouses that a user has manager permissions on. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListWarehousesRequest) listing.Iterator[EndpointInfo] + + // List warehouses. + // + // Lists all SQL warehouses that a user has manager permissions on. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListWarehousesRequest) ([]EndpointInfo, error) + + // EndpointInfoNameToIdMap calls [WarehousesPreviewAPI.ListAll] and creates a map of results with [EndpointInfo].Name as key and [EndpointInfo].Id as value. + // + // Returns an error if there's more than one [EndpointInfo] with the same .Name. + // + // Note: All [EndpointInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + EndpointInfoNameToIdMap(ctx context.Context, request ListWarehousesRequest) (map[string]string, error) + + // GetByName calls [WarehousesPreviewAPI.EndpointInfoNameToIdMap] and returns a single [EndpointInfo]. + // + // Returns an error if there's more than one [EndpointInfo] with the same .Name. + // + // Note: All [EndpointInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByName(ctx context.Context, name string) (*EndpointInfo, error) + + // Set SQL warehouse permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error) + + // Set the workspace configuration. + // + // Sets the workspace level configuration that is shared by all SQL warehouses + // in a workspace. + SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error + + // Start a warehouse. + // + // Starts a SQL warehouse. + Start(ctx context.Context, request StartRequest) error + + // Stop a warehouse. + // + // Stops a SQL warehouse. + Stop(ctx context.Context, request StopRequest) error + + // Update SQL warehouse permissions. + // + // Updates the permissions on a SQL warehouse. SQL warehouses can inherit + // permissions from their root object. + UpdatePermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error) +} + +func NewWarehousesPreview(client *client.DatabricksClient) *WarehousesPreviewAPI { + return &WarehousesPreviewAPI{ + warehousesPreviewImpl: warehousesPreviewImpl{ + client: client, + }, + } +} + +// A SQL warehouse is a compute resource that lets you run SQL commands on data +// objects within Databricks SQL. Compute resources are infrastructure resources +// that provide processing capabilities in the cloud. +type WarehousesPreviewAPI struct { + warehousesPreviewImpl +} + +// Delete a warehouse. +// +// Deletes a SQL warehouse. +func (a *WarehousesPreviewAPI) DeleteById(ctx context.Context, id string) error { + return a.warehousesPreviewImpl.Delete(ctx, DeleteWarehouseRequest{ + Id: id, + }) +} + +// Get warehouse info. +// +// Gets the information for a single SQL warehouse. +func (a *WarehousesPreviewAPI) GetById(ctx context.Context, id string) (*GetWarehouseResponse, error) { + return a.warehousesPreviewImpl.Get(ctx, GetWarehouseRequest{ + Id: id, + }) +} + +// Get SQL warehouse permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *WarehousesPreviewAPI) GetPermissionLevelsByWarehouseId(ctx context.Context, warehouseId string) (*GetWarehousePermissionLevelsResponse, error) { + return a.warehousesPreviewImpl.GetPermissionLevels(ctx, GetWarehousePermissionLevelsRequest{ + WarehouseId: warehouseId, + }) +} + +// Get SQL warehouse permissions. +// +// Gets the permissions of a SQL warehouse. SQL warehouses can inherit +// permissions from their root object. +func (a *WarehousesPreviewAPI) GetPermissionsByWarehouseId(ctx context.Context, warehouseId string) (*WarehousePermissions, error) { + return a.warehousesPreviewImpl.GetPermissions(ctx, GetWarehousePermissionsRequest{ + WarehouseId: warehouseId, + }) +} + +// EndpointInfoNameToIdMap calls [WarehousesPreviewAPI.ListAll] and creates a map of results with [EndpointInfo].Name as key and [EndpointInfo].Id as value. +// +// Returns an error if there's more than one [EndpointInfo] with the same .Name. +// +// Note: All [EndpointInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *WarehousesPreviewAPI) EndpointInfoNameToIdMap(ctx context.Context, request ListWarehousesRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Name + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Name: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByName calls [WarehousesPreviewAPI.EndpointInfoNameToIdMap] and returns a single [EndpointInfo]. +// +// Returns an error if there's more than one [EndpointInfo] with the same .Name. +// +// Note: All [EndpointInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *WarehousesPreviewAPI) GetByName(ctx context.Context, name string) (*EndpointInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListWarehousesRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]EndpointInfo{} + for _, v := range result { + key := v.Name + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("EndpointInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of EndpointInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} diff --git a/sql/v2preview/client.go b/sql/v2preview/client.go new file mode 100755 index 000000000..ffc4e68df --- /dev/null +++ b/sql/v2preview/client.go @@ -0,0 +1,487 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package sqlpreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type AlertsLegacyPreviewClient struct { + AlertsLegacyPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewAlertsLegacyPreviewClient(cfg *config.Config) (*AlertsLegacyPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &AlertsLegacyPreviewClient{ + Config: cfg, + apiClient: apiClient, + AlertsLegacyPreviewInterface: NewAlertsLegacyPreview(databricksClient), + }, nil +} + +type AlertsPreviewClient struct { + AlertsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewAlertsPreviewClient(cfg *config.Config) (*AlertsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &AlertsPreviewClient{ + Config: cfg, + apiClient: apiClient, + AlertsPreviewInterface: NewAlertsPreview(databricksClient), + }, nil +} + +type DashboardWidgetsPreviewClient struct { + DashboardWidgetsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewDashboardWidgetsPreviewClient(cfg *config.Config) (*DashboardWidgetsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &DashboardWidgetsPreviewClient{ + Config: cfg, + apiClient: apiClient, + DashboardWidgetsPreviewInterface: NewDashboardWidgetsPreview(databricksClient), + }, nil +} + +type DashboardsPreviewClient struct { + DashboardsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewDashboardsPreviewClient(cfg *config.Config) (*DashboardsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &DashboardsPreviewClient{ + Config: cfg, + apiClient: apiClient, + DashboardsPreviewInterface: NewDashboardsPreview(databricksClient), + }, nil +} + +type DataSourcesPreviewClient struct { + DataSourcesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewDataSourcesPreviewClient(cfg *config.Config) (*DataSourcesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &DataSourcesPreviewClient{ + Config: cfg, + apiClient: apiClient, + DataSourcesPreviewInterface: NewDataSourcesPreview(databricksClient), + }, nil +} + +type DbsqlPermissionsPreviewClient struct { + DbsqlPermissionsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewDbsqlPermissionsPreviewClient(cfg *config.Config) (*DbsqlPermissionsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &DbsqlPermissionsPreviewClient{ + Config: cfg, + apiClient: apiClient, + DbsqlPermissionsPreviewInterface: NewDbsqlPermissionsPreview(databricksClient), + }, nil +} + +type QueriesLegacyPreviewClient struct { + QueriesLegacyPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewQueriesLegacyPreviewClient(cfg *config.Config) (*QueriesLegacyPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &QueriesLegacyPreviewClient{ + Config: cfg, + apiClient: apiClient, + QueriesLegacyPreviewInterface: NewQueriesLegacyPreview(databricksClient), + }, nil +} + +type QueriesPreviewClient struct { + QueriesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewQueriesPreviewClient(cfg *config.Config) (*QueriesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &QueriesPreviewClient{ + Config: cfg, + apiClient: apiClient, + QueriesPreviewInterface: NewQueriesPreview(databricksClient), + }, nil +} + +type QueryHistoryPreviewClient struct { + QueryHistoryPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewQueryHistoryPreviewClient(cfg *config.Config) (*QueryHistoryPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &QueryHistoryPreviewClient{ + Config: cfg, + apiClient: apiClient, + QueryHistoryPreviewInterface: NewQueryHistoryPreview(databricksClient), + }, nil +} + +type QueryVisualizationsLegacyPreviewClient struct { + QueryVisualizationsLegacyPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewQueryVisualizationsLegacyPreviewClient(cfg *config.Config) (*QueryVisualizationsLegacyPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &QueryVisualizationsLegacyPreviewClient{ + Config: cfg, + apiClient: apiClient, + QueryVisualizationsLegacyPreviewInterface: NewQueryVisualizationsLegacyPreview(databricksClient), + }, nil +} + +type QueryVisualizationsPreviewClient struct { + QueryVisualizationsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewQueryVisualizationsPreviewClient(cfg *config.Config) (*QueryVisualizationsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &QueryVisualizationsPreviewClient{ + Config: cfg, + apiClient: apiClient, + QueryVisualizationsPreviewInterface: NewQueryVisualizationsPreview(databricksClient), + }, nil +} + +type RedashConfigPreviewClient struct { + RedashConfigPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewRedashConfigPreviewClient(cfg *config.Config) (*RedashConfigPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &RedashConfigPreviewClient{ + Config: cfg, + apiClient: apiClient, + RedashConfigPreviewInterface: NewRedashConfigPreview(databricksClient), + }, nil +} + +type StatementExecutionPreviewClient struct { + StatementExecutionPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewStatementExecutionPreviewClient(cfg *config.Config) (*StatementExecutionPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &StatementExecutionPreviewClient{ + Config: cfg, + apiClient: apiClient, + StatementExecutionPreviewInterface: NewStatementExecutionPreview(databricksClient), + }, nil +} + +type WarehousesPreviewClient struct { + WarehousesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewWarehousesPreviewClient(cfg *config.Config) (*WarehousesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &WarehousesPreviewClient{ + Config: cfg, + apiClient: apiClient, + WarehousesPreviewInterface: NewWarehousesPreview(databricksClient), + }, nil +} diff --git a/sql/v2preview/impl.go b/sql/v2preview/impl.go new file mode 100755 index 000000000..5b7198c3e --- /dev/null +++ b/sql/v2preview/impl.go @@ -0,0 +1,952 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package sqlpreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just AlertsLegacyPreview API methods +type alertsLegacyPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *alertsLegacyPreviewImpl) Create(ctx context.Context, request CreateAlert) (*LegacyAlert, error) { + var legacyAlert LegacyAlert + path := "/api/2.0preview/preview/sql/alerts" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyAlert) + return &legacyAlert, err +} + +func (a *alertsLegacyPreviewImpl) Delete(ctx context.Context, request DeleteAlertsLegacyRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/alerts/%v", request.AlertId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *alertsLegacyPreviewImpl) Get(ctx context.Context, request GetAlertsLegacyRequest) (*LegacyAlert, error) { + var legacyAlert LegacyAlert + path := fmt.Sprintf("/api/2.0preview/preview/sql/alerts/%v", request.AlertId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &legacyAlert) + return &legacyAlert, err +} + +func (a *alertsLegacyPreviewImpl) List(ctx context.Context) ([]LegacyAlert, error) { + var legacyAlertList []LegacyAlert + path := "/api/2.0preview/preview/sql/alerts" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &legacyAlertList) + return legacyAlertList, err +} + +func (a *alertsLegacyPreviewImpl) Update(ctx context.Context, request EditAlert) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/alerts/%v", request.AlertId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just AlertsPreview API methods +type alertsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *alertsPreviewImpl) Create(ctx context.Context, request CreateAlertRequest) (*Alert, error) { + var alert Alert + path := "/api/2.0preview/sql/alerts" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &alert) + return &alert, err +} + +func (a *alertsPreviewImpl) Delete(ctx context.Context, request TrashAlertRequest) error { + var empty Empty + path := fmt.Sprintf("/api/2.0preview/sql/alerts/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &empty) + return err +} + +func (a *alertsPreviewImpl) Get(ctx context.Context, request GetAlertRequest) (*Alert, error) { + var alert Alert + path := fmt.Sprintf("/api/2.0preview/sql/alerts/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &alert) + return &alert, err +} + +// List alerts. +// +// Gets a list of alerts accessible to the user, ordered by creation time. +// **Warning:** Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *alertsPreviewImpl) List(ctx context.Context, request ListAlertsRequest) listing.Iterator[ListAlertsResponseAlert] { + + getNextPage := func(ctx context.Context, req ListAlertsRequest) (*ListAlertsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAlertsResponse) []ListAlertsResponseAlert { + return resp.Results + } + getNextReq := func(resp *ListAlertsResponse) *ListAlertsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List alerts. +// +// Gets a list of alerts accessible to the user, ordered by creation time. +// **Warning:** Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *alertsPreviewImpl) ListAll(ctx context.Context, request ListAlertsRequest) ([]ListAlertsResponseAlert, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ListAlertsResponseAlert](ctx, iterator) +} +func (a *alertsPreviewImpl) internalList(ctx context.Context, request ListAlertsRequest) (*ListAlertsResponse, error) { + var listAlertsResponse ListAlertsResponse + path := "/api/2.0preview/sql/alerts" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAlertsResponse) + return &listAlertsResponse, err +} + +func (a *alertsPreviewImpl) Update(ctx context.Context, request UpdateAlertRequest) (*Alert, error) { + var alert Alert + path := fmt.Sprintf("/api/2.0preview/sql/alerts/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &alert) + return &alert, err +} + +// unexported type that holds implementations of just DashboardWidgetsPreview API methods +type dashboardWidgetsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *dashboardWidgetsPreviewImpl) Create(ctx context.Context, request CreateWidget) (*Widget, error) { + var widget Widget + path := "/api/2.0preview/preview/sql/widgets" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &widget) + return &widget, err +} + +func (a *dashboardWidgetsPreviewImpl) Delete(ctx context.Context, request DeleteDashboardWidgetRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/widgets/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *dashboardWidgetsPreviewImpl) Update(ctx context.Context, request CreateWidget) (*Widget, error) { + var widget Widget + path := fmt.Sprintf("/api/2.0preview/preview/sql/widgets/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &widget) + return &widget, err +} + +// unexported type that holds implementations of just DashboardsPreview API methods +type dashboardsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *dashboardsPreviewImpl) Create(ctx context.Context, request DashboardPostContent) (*Dashboard, error) { + var dashboard Dashboard + path := "/api/2.0preview/preview/sql/dashboards" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &dashboard) + return &dashboard, err +} + +func (a *dashboardsPreviewImpl) Delete(ctx context.Context, request DeleteDashboardRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *dashboardsPreviewImpl) Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error) { + var dashboard Dashboard + path := fmt.Sprintf("/api/2.0preview/preview/sql/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &dashboard) + return &dashboard, err +} + +// Get dashboard objects. +// +// Fetch a paginated list of dashboard objects. +// +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *dashboardsPreviewImpl) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { + + request.Page = 1 // start iterating from the first page + + getNextPage := func(ctx context.Context, req ListDashboardsRequest) (*ListResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListResponse) []Dashboard { + return resp.Results + } + getNextReq := func(resp *ListResponse) *ListDashboardsRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.Page = resp.Page + 1 + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[Dashboard, string]( + iterator, + func(item Dashboard) string { + return item.Id + }) + return dedupedIterator +} + +// Get dashboard objects. +// +// Fetch a paginated list of dashboard objects. +// +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *dashboardsPreviewImpl) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[Dashboard, int](ctx, iterator, request.PageSize) + +} +func (a *dashboardsPreviewImpl) internalList(ctx context.Context, request ListDashboardsRequest) (*ListResponse, error) { + var listResponse ListResponse + path := "/api/2.0preview/preview/sql/dashboards" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listResponse) + return &listResponse, err +} + +func (a *dashboardsPreviewImpl) Restore(ctx context.Context, request RestoreDashboardRequest) error { + var restoreResponse RestoreResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/dashboards/trash/%v", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &restoreResponse) + return err +} + +func (a *dashboardsPreviewImpl) Update(ctx context.Context, request DashboardEditContent) (*Dashboard, error) { + var dashboard Dashboard + path := fmt.Sprintf("/api/2.0preview/preview/sql/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &dashboard) + return &dashboard, err +} + +// unexported type that holds implementations of just DataSourcesPreview API methods +type dataSourcesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *dataSourcesPreviewImpl) List(ctx context.Context) ([]DataSource, error) { + var dataSourceList []DataSource + path := "/api/2.0preview/preview/sql/data_sources" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &dataSourceList) + return dataSourceList, err +} + +// unexported type that holds implementations of just DbsqlPermissionsPreview API methods +type dbsqlPermissionsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *dbsqlPermissionsPreviewImpl) Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error) { + var getResponse GetResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/permissions/%v/%v", request.ObjectType, request.ObjectId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getResponse) + return &getResponse, err +} + +func (a *dbsqlPermissionsPreviewImpl) Set(ctx context.Context, request SetRequest) (*SetResponse, error) { + var setResponse SetResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/permissions/%v/%v", request.ObjectType, request.ObjectId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &setResponse) + return &setResponse, err +} + +func (a *dbsqlPermissionsPreviewImpl) TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error) { + var success Success + path := fmt.Sprintf("/api/2.0preview/preview/sql/permissions/%v/%v/transfer", request.ObjectType, request.ObjectId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &success) + return &success, err +} + +// unexported type that holds implementations of just QueriesLegacyPreview API methods +type queriesLegacyPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *queriesLegacyPreviewImpl) Create(ctx context.Context, request QueryPostContent) (*LegacyQuery, error) { + var legacyQuery LegacyQuery + path := "/api/2.0preview/preview/sql/queries" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyQuery) + return &legacyQuery, err +} + +func (a *queriesLegacyPreviewImpl) Delete(ctx context.Context, request DeleteQueriesLegacyRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/%v", request.QueryId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *queriesLegacyPreviewImpl) Get(ctx context.Context, request GetQueriesLegacyRequest) (*LegacyQuery, error) { + var legacyQuery LegacyQuery + path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/%v", request.QueryId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &legacyQuery) + return &legacyQuery, err +} + +// Get a list of queries. +// +// Gets a list of queries. Optionally, this list can be filtered by a search +// term. +// +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:queries/list instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *queriesLegacyPreviewImpl) List(ctx context.Context, request ListQueriesLegacyRequest) listing.Iterator[LegacyQuery] { + + request.Page = 1 // start iterating from the first page + + getNextPage := func(ctx context.Context, req ListQueriesLegacyRequest) (*QueryList, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *QueryList) []LegacyQuery { + return resp.Results + } + getNextReq := func(resp *QueryList) *ListQueriesLegacyRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.Page = resp.Page + 1 + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[LegacyQuery, string]( + iterator, + func(item LegacyQuery) string { + return item.Id + }) + return dedupedIterator +} + +// Get a list of queries. +// +// Gets a list of queries. Optionally, this list can be filtered by a search +// term. +// +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:queries/list instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *queriesLegacyPreviewImpl) ListAll(ctx context.Context, request ListQueriesLegacyRequest) ([]LegacyQuery, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[LegacyQuery, int](ctx, iterator, request.PageSize) + +} +func (a *queriesLegacyPreviewImpl) internalList(ctx context.Context, request ListQueriesLegacyRequest) (*QueryList, error) { + var queryList QueryList + path := "/api/2.0preview/preview/sql/queries" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &queryList) + return &queryList, err +} + +func (a *queriesLegacyPreviewImpl) Restore(ctx context.Context, request RestoreQueriesLegacyRequest) error { + var restoreResponse RestoreResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/trash/%v", request.QueryId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &restoreResponse) + return err +} + +func (a *queriesLegacyPreviewImpl) Update(ctx context.Context, request QueryEditContent) (*LegacyQuery, error) { + var legacyQuery LegacyQuery + path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/%v", request.QueryId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyQuery) + return &legacyQuery, err +} + +// unexported type that holds implementations of just QueriesPreview API methods +type queriesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *queriesPreviewImpl) Create(ctx context.Context, request CreateQueryRequest) (*Query, error) { + var query Query + path := "/api/2.0preview/sql/queries" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &query) + return &query, err +} + +func (a *queriesPreviewImpl) Delete(ctx context.Context, request TrashQueryRequest) error { + var empty Empty + path := fmt.Sprintf("/api/2.0preview/sql/queries/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &empty) + return err +} + +func (a *queriesPreviewImpl) Get(ctx context.Context, request GetQueryRequest) (*Query, error) { + var query Query + path := fmt.Sprintf("/api/2.0preview/sql/queries/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &query) + return &query, err +} + +// List queries. +// +// Gets a list of queries accessible to the user, ordered by creation time. +// **Warning:** Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *queriesPreviewImpl) List(ctx context.Context, request ListQueriesRequest) listing.Iterator[ListQueryObjectsResponseQuery] { + + getNextPage := func(ctx context.Context, req ListQueriesRequest) (*ListQueryObjectsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListQueryObjectsResponse) []ListQueryObjectsResponseQuery { + return resp.Results + } + getNextReq := func(resp *ListQueryObjectsResponse) *ListQueriesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List queries. +// +// Gets a list of queries accessible to the user, ordered by creation time. +// **Warning:** Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *queriesPreviewImpl) ListAll(ctx context.Context, request ListQueriesRequest) ([]ListQueryObjectsResponseQuery, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ListQueryObjectsResponseQuery](ctx, iterator) +} +func (a *queriesPreviewImpl) internalList(ctx context.Context, request ListQueriesRequest) (*ListQueryObjectsResponse, error) { + var listQueryObjectsResponse ListQueryObjectsResponse + path := "/api/2.0preview/sql/queries" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listQueryObjectsResponse) + return &listQueryObjectsResponse, err +} + +// List visualizations on a query. +// +// Gets a list of visualizations on a query. +func (a *queriesPreviewImpl) ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) listing.Iterator[Visualization] { + + getNextPage := func(ctx context.Context, req ListVisualizationsForQueryRequest) (*ListVisualizationsForQueryResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListVisualizations(ctx, req) + } + getItems := func(resp *ListVisualizationsForQueryResponse) []Visualization { + return resp.Results + } + getNextReq := func(resp *ListVisualizationsForQueryResponse) *ListVisualizationsForQueryRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List visualizations on a query. +// +// Gets a list of visualizations on a query. +func (a *queriesPreviewImpl) ListVisualizationsAll(ctx context.Context, request ListVisualizationsForQueryRequest) ([]Visualization, error) { + iterator := a.ListVisualizations(ctx, request) + return listing.ToSlice[Visualization](ctx, iterator) +} +func (a *queriesPreviewImpl) internalListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) (*ListVisualizationsForQueryResponse, error) { + var listVisualizationsForQueryResponse ListVisualizationsForQueryResponse + path := fmt.Sprintf("/api/2.0preview/sql/queries/%v/visualizations", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listVisualizationsForQueryResponse) + return &listVisualizationsForQueryResponse, err +} + +func (a *queriesPreviewImpl) Update(ctx context.Context, request UpdateQueryRequest) (*Query, error) { + var query Query + path := fmt.Sprintf("/api/2.0preview/sql/queries/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &query) + return &query, err +} + +// unexported type that holds implementations of just QueryHistoryPreview API methods +type queryHistoryPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *queryHistoryPreviewImpl) List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error) { + var listQueriesResponse ListQueriesResponse + path := "/api/2.0preview/sql/history/queries" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listQueriesResponse) + return &listQueriesResponse, err +} + +// unexported type that holds implementations of just QueryVisualizationsLegacyPreview API methods +type queryVisualizationsLegacyPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *queryVisualizationsLegacyPreviewImpl) Create(ctx context.Context, request CreateQueryVisualizationsLegacyRequest) (*LegacyVisualization, error) { + var legacyVisualization LegacyVisualization + path := "/api/2.0preview/preview/sql/visualizations" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyVisualization) + return &legacyVisualization, err +} + +func (a *queryVisualizationsLegacyPreviewImpl) Delete(ctx context.Context, request DeleteQueryVisualizationsLegacyRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/visualizations/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *queryVisualizationsLegacyPreviewImpl) Update(ctx context.Context, request LegacyVisualization) (*LegacyVisualization, error) { + var legacyVisualization LegacyVisualization + path := fmt.Sprintf("/api/2.0preview/preview/sql/visualizations/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyVisualization) + return &legacyVisualization, err +} + +// unexported type that holds implementations of just QueryVisualizationsPreview API methods +type queryVisualizationsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *queryVisualizationsPreviewImpl) Create(ctx context.Context, request CreateVisualizationRequest) (*Visualization, error) { + var visualization Visualization + path := "/api/2.0preview/sql/visualizations" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &visualization) + return &visualization, err +} + +func (a *queryVisualizationsPreviewImpl) Delete(ctx context.Context, request DeleteVisualizationRequest) error { + var empty Empty + path := fmt.Sprintf("/api/2.0preview/sql/visualizations/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &empty) + return err +} + +func (a *queryVisualizationsPreviewImpl) Update(ctx context.Context, request UpdateVisualizationRequest) (*Visualization, error) { + var visualization Visualization + path := fmt.Sprintf("/api/2.0preview/sql/visualizations/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &visualization) + return &visualization, err +} + +// unexported type that holds implementations of just RedashConfigPreview API methods +type redashConfigPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *redashConfigPreviewImpl) GetConfig(ctx context.Context) (*ClientConfig, error) { + var clientConfig ClientConfig + path := "/api/2.0preview/redash-v2/config" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &clientConfig) + return &clientConfig, err +} + +// unexported type that holds implementations of just StatementExecutionPreview API methods +type statementExecutionPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *statementExecutionPreviewImpl) CancelExecution(ctx context.Context, request CancelExecutionRequest) error { + var cancelExecutionResponse CancelExecutionResponse + path := fmt.Sprintf("/api/2.0preview/sql/statements/%v/cancel", request.StatementId) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &cancelExecutionResponse) + return err +} + +func (a *statementExecutionPreviewImpl) ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*StatementResponse, error) { + var statementResponse StatementResponse + path := "/api/2.0preview/sql/statements/" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &statementResponse) + return &statementResponse, err +} + +func (a *statementExecutionPreviewImpl) GetStatement(ctx context.Context, request GetStatementRequest) (*StatementResponse, error) { + var statementResponse StatementResponse + path := fmt.Sprintf("/api/2.0preview/sql/statements/%v", request.StatementId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &statementResponse) + return &statementResponse, err +} + +func (a *statementExecutionPreviewImpl) GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error) { + var resultData ResultData + path := fmt.Sprintf("/api/2.0preview/sql/statements/%v/result/chunks/%v", request.StatementId, request.ChunkIndex) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &resultData) + return &resultData, err +} + +// unexported type that holds implementations of just WarehousesPreview API methods +type warehousesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *warehousesPreviewImpl) Create(ctx context.Context, request CreateWarehouseRequest) (*CreateWarehouseResponse, error) { + var createWarehouseResponse CreateWarehouseResponse + path := "/api/2.0preview/sql/warehouses" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createWarehouseResponse) + return &createWarehouseResponse, err +} + +func (a *warehousesPreviewImpl) Delete(ctx context.Context, request DeleteWarehouseRequest) error { + var deleteWarehouseResponse DeleteWarehouseResponse + path := fmt.Sprintf("/api/2.0preview/sql/warehouses/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteWarehouseResponse) + return err +} + +func (a *warehousesPreviewImpl) Edit(ctx context.Context, request EditWarehouseRequest) error { + var editWarehouseResponse EditWarehouseResponse + path := fmt.Sprintf("/api/2.0preview/sql/warehouses/%v/edit", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &editWarehouseResponse) + return err +} + +func (a *warehousesPreviewImpl) Get(ctx context.Context, request GetWarehouseRequest) (*GetWarehouseResponse, error) { + var getWarehouseResponse GetWarehouseResponse + path := fmt.Sprintf("/api/2.0preview/sql/warehouses/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getWarehouseResponse) + return &getWarehouseResponse, err +} + +func (a *warehousesPreviewImpl) GetPermissionLevels(ctx context.Context, request GetWarehousePermissionLevelsRequest) (*GetWarehousePermissionLevelsResponse, error) { + var getWarehousePermissionLevelsResponse GetWarehousePermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/warehouses/%v/permissionLevels", request.WarehouseId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getWarehousePermissionLevelsResponse) + return &getWarehousePermissionLevelsResponse, err +} + +func (a *warehousesPreviewImpl) GetPermissions(ctx context.Context, request GetWarehousePermissionsRequest) (*WarehousePermissions, error) { + var warehousePermissions WarehousePermissions + path := fmt.Sprintf("/api/2.0preview/permissions/warehouses/%v", request.WarehouseId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &warehousePermissions) + return &warehousePermissions, err +} + +func (a *warehousesPreviewImpl) GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error) { + var getWorkspaceWarehouseConfigResponse GetWorkspaceWarehouseConfigResponse + path := "/api/2.0preview/sql/config/warehouses" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getWorkspaceWarehouseConfigResponse) + return &getWorkspaceWarehouseConfigResponse, err +} + +// List warehouses. +// +// Lists all SQL warehouses that a user has manager permissions on. +func (a *warehousesPreviewImpl) List(ctx context.Context, request ListWarehousesRequest) listing.Iterator[EndpointInfo] { + + getNextPage := func(ctx context.Context, req ListWarehousesRequest) (*ListWarehousesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListWarehousesResponse) []EndpointInfo { + return resp.Warehouses + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List warehouses. +// +// Lists all SQL warehouses that a user has manager permissions on. +func (a *warehousesPreviewImpl) ListAll(ctx context.Context, request ListWarehousesRequest) ([]EndpointInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[EndpointInfo](ctx, iterator) +} +func (a *warehousesPreviewImpl) internalList(ctx context.Context, request ListWarehousesRequest) (*ListWarehousesResponse, error) { + var listWarehousesResponse ListWarehousesResponse + path := "/api/2.0preview/sql/warehouses" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listWarehousesResponse) + return &listWarehousesResponse, err +} + +func (a *warehousesPreviewImpl) SetPermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error) { + var warehousePermissions WarehousePermissions + path := fmt.Sprintf("/api/2.0preview/permissions/warehouses/%v", request.WarehouseId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &warehousePermissions) + return &warehousePermissions, err +} + +func (a *warehousesPreviewImpl) SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error { + var setWorkspaceWarehouseConfigResponse SetWorkspaceWarehouseConfigResponse + path := "/api/2.0preview/sql/config/warehouses" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &setWorkspaceWarehouseConfigResponse) + return err +} + +func (a *warehousesPreviewImpl) Start(ctx context.Context, request StartRequest) error { + var startWarehouseResponse StartWarehouseResponse + path := fmt.Sprintf("/api/2.0preview/sql/warehouses/%v/start", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &startWarehouseResponse) + return err +} + +func (a *warehousesPreviewImpl) Stop(ctx context.Context, request StopRequest) error { + var stopWarehouseResponse StopWarehouseResponse + path := fmt.Sprintf("/api/2.0preview/sql/warehouses/%v/stop", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &stopWarehouseResponse) + return err +} + +func (a *warehousesPreviewImpl) UpdatePermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error) { + var warehousePermissions WarehousePermissions + path := fmt.Sprintf("/api/2.0preview/permissions/warehouses/%v", request.WarehouseId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &warehousePermissions) + return &warehousePermissions, err +} diff --git a/sql/v2preview/model.go b/sql/v2preview/model.go new file mode 100755 index 000000000..c497297a0 --- /dev/null +++ b/sql/v2preview/model.go @@ -0,0 +1,4823 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package sqlpreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type AccessControl struct { + GroupName string `json:"group_name,omitempty"` + // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * + // `CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query + PermissionLevel PermissionLevel `json:"permission_level,omitempty"` + + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AccessControl) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AccessControl) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Alert struct { + // Trigger conditions of the alert. + Condition *AlertCondition `json:"condition,omitempty"` + // The timestamp indicating when the alert was created. + CreateTime string `json:"create_time,omitempty"` + // Custom body of alert notification, if it exists. See [here] for custom + // templating instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomBody string `json:"custom_body,omitempty"` + // Custom subject of alert notification, if it exists. This can include + // email subject entries and Slack notification headers, for example. See + // [here] for custom templating instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomSubject string `json:"custom_subject,omitempty"` + // The display name of the alert. + DisplayName string `json:"display_name,omitempty"` + // UUID identifying the alert. + Id string `json:"id,omitempty"` + // The workspace state of the alert. Used for tracking trashed status. + LifecycleState LifecycleState `json:"lifecycle_state,omitempty"` + // Whether to notify alert subscribers when alert returns back to normal. + NotifyOnOk bool `json:"notify_on_ok,omitempty"` + // The owner's username. This field is set to "Unavailable" if the user has + // been deleted. + OwnerUserName string `json:"owner_user_name,omitempty"` + // The workspace path of the folder containing the alert. + ParentPath string `json:"parent_path,omitempty"` + // UUID of the query attached to the alert. + QueryId string `json:"query_id,omitempty"` + // Number of seconds an alert must wait after being triggered to rearm + // itself. After rearming, it can be triggered again. If 0 or not specified, + // the alert will not be triggered again. + SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"` + // Current state of the alert's trigger status. This field is set to UNKNOWN + // if the alert has not yet been evaluated or ran into an error during the + // last evaluation. + State AlertState `json:"state,omitempty"` + // Timestamp when the alert was last triggered, if the alert has been + // triggered before. + TriggerTime string `json:"trigger_time,omitempty"` + // The timestamp indicating when the alert was updated. + UpdateTime string `json:"update_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Alert) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Alert) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertCondition struct { + // Alert state if result is empty. + EmptyResultState AlertState `json:"empty_result_state,omitempty"` + // Operator used for comparison in alert evaluation. + Op AlertOperator `json:"op,omitempty"` + // Name of the column from the query result to use for comparison in alert + // evaluation. + Operand *AlertConditionOperand `json:"operand,omitempty"` + // Threshold value used for comparison in alert evaluation. + Threshold *AlertConditionThreshold `json:"threshold,omitempty"` +} + +type AlertConditionOperand struct { + Column *AlertOperandColumn `json:"column,omitempty"` +} + +type AlertConditionThreshold struct { + Value *AlertOperandValue `json:"value,omitempty"` +} + +type AlertOperandColumn struct { + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AlertOperandColumn) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertOperandColumn) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertOperandValue struct { + BoolValue bool `json:"bool_value,omitempty"` + + DoubleValue float64 `json:"double_value,omitempty"` + + StringValue string `json:"string_value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AlertOperandValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertOperandValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertOperator string + +const AlertOperatorEqual AlertOperator = `EQUAL` + +const AlertOperatorGreaterThan AlertOperator = `GREATER_THAN` + +const AlertOperatorGreaterThanOrEqual AlertOperator = `GREATER_THAN_OR_EQUAL` + +const AlertOperatorIsNull AlertOperator = `IS_NULL` + +const AlertOperatorLessThan AlertOperator = `LESS_THAN` + +const AlertOperatorLessThanOrEqual AlertOperator = `LESS_THAN_OR_EQUAL` + +const AlertOperatorNotEqual AlertOperator = `NOT_EQUAL` + +// String representation for [fmt.Print] +func (f *AlertOperator) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AlertOperator) Set(v string) error { + switch v { + case `EQUAL`, `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `IS_NULL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `NOT_EQUAL`: + *f = AlertOperator(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EQUAL", "GREATER_THAN", "GREATER_THAN_OR_EQUAL", "IS_NULL", "LESS_THAN", "LESS_THAN_OR_EQUAL", "NOT_EQUAL"`, v) + } +} + +// Type always returns AlertOperator to satisfy [pflag.Value] interface +func (f *AlertOperator) Type() string { + return "AlertOperator" +} + +// Alert configuration options. +type AlertOptions struct { + // Name of column in the query result to compare in alert evaluation. + Column string `json:"column"` + // Custom body of alert notification, if it exists. See [here] for custom + // templating instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomBody string `json:"custom_body,omitempty"` + // Custom subject of alert notification, if it exists. This includes email + // subject, Slack notification header, etc. See [here] for custom templating + // instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomSubject string `json:"custom_subject,omitempty"` + // State that alert evaluates to when query result is empty. + EmptyResultState AlertOptionsEmptyResultState `json:"empty_result_state,omitempty"` + // Whether or not the alert is muted. If an alert is muted, it will not + // notify users and notification destinations when triggered. + Muted bool `json:"muted,omitempty"` + // Operator used to compare in alert evaluation: `>`, `>=`, `<`, `<=`, `==`, + // `!=` + Op string `json:"op"` + // Value used to compare in alert evaluation. Supported types include + // strings (eg. 'foobar'), floats (eg. 123.4), and booleans (true). + Value any `json:"value"` + + ForceSendFields []string `json:"-"` +} + +func (s *AlertOptions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertOptions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// State that alert evaluates to when query result is empty. +type AlertOptionsEmptyResultState string + +const AlertOptionsEmptyResultStateOk AlertOptionsEmptyResultState = `ok` + +const AlertOptionsEmptyResultStateTriggered AlertOptionsEmptyResultState = `triggered` + +const AlertOptionsEmptyResultStateUnknown AlertOptionsEmptyResultState = `unknown` + +// String representation for [fmt.Print] +func (f *AlertOptionsEmptyResultState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AlertOptionsEmptyResultState) Set(v string) error { + switch v { + case `ok`, `triggered`, `unknown`: + *f = AlertOptionsEmptyResultState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ok", "triggered", "unknown"`, v) + } +} + +// Type always returns AlertOptionsEmptyResultState to satisfy [pflag.Value] interface +func (f *AlertOptionsEmptyResultState) Type() string { + return "AlertOptionsEmptyResultState" +} + +type AlertQuery struct { + // The timestamp when this query was created. + CreatedAt string `json:"created_at,omitempty"` + // Data source ID maps to the ID of the data source used by the resource and + // is distinct from the warehouse ID. [Learn more] + // + // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + DataSourceId string `json:"data_source_id,omitempty"` + // General description that conveys additional information about this query + // such as usage notes. + Description string `json:"description,omitempty"` + // Query ID. + Id string `json:"id,omitempty"` + // Indicates whether the query is trashed. Trashed queries can't be used in + // dashboards, or appear in search results. If this boolean is `true`, the + // `options` property for this query includes a `moved_to_trash_at` + // timestamp. Trashed queries are permanently deleted after 30 days. + IsArchived bool `json:"is_archived,omitempty"` + // Whether the query is a draft. Draft queries only appear in list views for + // their owners. Visualizations from draft queries cannot appear on + // dashboards. + IsDraft bool `json:"is_draft,omitempty"` + // Text parameter types are not safe from SQL injection for all types of + // data source. Set this Boolean parameter to `true` if a query either does + // not use any text type parameters or uses a data source type where text + // type parameters are handled safely. + IsSafe bool `json:"is_safe,omitempty"` + // The title of this query that appears in list views, widget headings, and + // on the query page. + Name string `json:"name,omitempty"` + + Options *QueryOptions `json:"options,omitempty"` + // The text of the query to be run. + Query string `json:"query,omitempty"` + + Tags []string `json:"tags,omitempty"` + // The timestamp at which this query was last updated. + UpdatedAt string `json:"updated_at,omitempty"` + // The ID of the user who owns the query. + UserId int `json:"user_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AlertQuery) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertQuery) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertState string + +const AlertStateOk AlertState = `OK` + +const AlertStateTriggered AlertState = `TRIGGERED` + +const AlertStateUnknown AlertState = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *AlertState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AlertState) Set(v string) error { + switch v { + case `OK`, `TRIGGERED`, `UNKNOWN`: + *f = AlertState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "OK", "TRIGGERED", "UNKNOWN"`, v) + } +} + +// Type always returns AlertState to satisfy [pflag.Value] interface +func (f *AlertState) Type() string { + return "AlertState" +} + +// Describes metadata for a particular chunk, within a result set; this +// structure is used both within a manifest, and when fetching individual chunk +// data or links. +type BaseChunkInfo struct { + // The number of bytes in the result chunk. This field is not available when + // using `INLINE` disposition. + ByteCount int64 `json:"byte_count,omitempty"` + // The position within the sequence of result set chunks. + ChunkIndex int `json:"chunk_index,omitempty"` + // The number of rows within the result chunk. + RowCount int64 `json:"row_count,omitempty"` + // The starting row offset within the result set. + RowOffset int64 `json:"row_offset,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *BaseChunkInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s BaseChunkInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Cancel statement execution +type CancelExecutionRequest struct { + // The statement ID is returned upon successfully submitting a SQL + // statement, and is a required reference for all subsequent calls. + StatementId string `json:"-" url:"-"` +} + +type CancelExecutionResponse struct { +} + +// Configures the channel name and DBSQL version of the warehouse. +// CHANNEL_NAME_CUSTOM should be chosen only when `dbsql_version` is specified. +type Channel struct { + DbsqlVersion string `json:"dbsql_version,omitempty"` + + Name ChannelName `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Channel) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Channel) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Details about a Channel. +type ChannelInfo struct { + // DB SQL Version the Channel is mapped to. + DbsqlVersion string `json:"dbsql_version,omitempty"` + // Name of the channel + Name ChannelName `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ChannelInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ChannelInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ChannelName string + +const ChannelNameChannelNameCurrent ChannelName = `CHANNEL_NAME_CURRENT` + +const ChannelNameChannelNameCustom ChannelName = `CHANNEL_NAME_CUSTOM` + +const ChannelNameChannelNamePreview ChannelName = `CHANNEL_NAME_PREVIEW` + +const ChannelNameChannelNamePrevious ChannelName = `CHANNEL_NAME_PREVIOUS` + +// String representation for [fmt.Print] +func (f *ChannelName) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ChannelName) Set(v string) error { + switch v { + case `CHANNEL_NAME_CURRENT`, `CHANNEL_NAME_CUSTOM`, `CHANNEL_NAME_PREVIEW`, `CHANNEL_NAME_PREVIOUS`: + *f = ChannelName(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CHANNEL_NAME_CURRENT", "CHANNEL_NAME_CUSTOM", "CHANNEL_NAME_PREVIEW", "CHANNEL_NAME_PREVIOUS"`, v) + } +} + +// Type always returns ChannelName to satisfy [pflag.Value] interface +func (f *ChannelName) Type() string { + return "ChannelName" +} + +type ClientConfig struct { + AllowCustomJsVisualizations bool `json:"allow_custom_js_visualizations,omitempty"` + + AllowDownloads bool `json:"allow_downloads,omitempty"` + + AllowExternalShares bool `json:"allow_external_shares,omitempty"` + + AllowSubscriptions bool `json:"allow_subscriptions,omitempty"` + + DateFormat string `json:"date_format,omitempty"` + + DateTimeFormat string `json:"date_time_format,omitempty"` + + DisablePublish bool `json:"disable_publish,omitempty"` + + EnableLegacyAutodetectTypes bool `json:"enable_legacy_autodetect_types,omitempty"` + + FeatureShowPermissionsControl bool `json:"feature_show_permissions_control,omitempty"` + + HidePlotlyModeBar bool `json:"hide_plotly_mode_bar,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClientConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClientConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ColumnInfo struct { + // The name of the column. + Name string `json:"name,omitempty"` + // The ordinal position of the column (starting at position 0). + Position int `json:"position,omitempty"` + // The format of the interval type. + TypeIntervalType string `json:"type_interval_type,omitempty"` + // The name of the base data type. This doesn't include details for complex + // types such as STRUCT, MAP or ARRAY. + TypeName ColumnInfoTypeName `json:"type_name,omitempty"` + // Specifies the number of digits in a number. This applies to the DECIMAL + // type. + TypePrecision int `json:"type_precision,omitempty"` + // Specifies the number of digits to the right of the decimal point in a + // number. This applies to the DECIMAL type. + TypeScale int `json:"type_scale,omitempty"` + // The full SQL type specification. + TypeText string `json:"type_text,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ColumnInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ColumnInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The name of the base data type. This doesn't include details for complex +// types such as STRUCT, MAP or ARRAY. +type ColumnInfoTypeName string + +const ColumnInfoTypeNameArray ColumnInfoTypeName = `ARRAY` + +const ColumnInfoTypeNameBinary ColumnInfoTypeName = `BINARY` + +const ColumnInfoTypeNameBoolean ColumnInfoTypeName = `BOOLEAN` + +const ColumnInfoTypeNameByte ColumnInfoTypeName = `BYTE` + +const ColumnInfoTypeNameChar ColumnInfoTypeName = `CHAR` + +const ColumnInfoTypeNameDate ColumnInfoTypeName = `DATE` + +const ColumnInfoTypeNameDecimal ColumnInfoTypeName = `DECIMAL` + +const ColumnInfoTypeNameDouble ColumnInfoTypeName = `DOUBLE` + +const ColumnInfoTypeNameFloat ColumnInfoTypeName = `FLOAT` + +const ColumnInfoTypeNameInt ColumnInfoTypeName = `INT` + +const ColumnInfoTypeNameInterval ColumnInfoTypeName = `INTERVAL` + +const ColumnInfoTypeNameLong ColumnInfoTypeName = `LONG` + +const ColumnInfoTypeNameMap ColumnInfoTypeName = `MAP` + +const ColumnInfoTypeNameNull ColumnInfoTypeName = `NULL` + +const ColumnInfoTypeNameShort ColumnInfoTypeName = `SHORT` + +const ColumnInfoTypeNameString ColumnInfoTypeName = `STRING` + +const ColumnInfoTypeNameStruct ColumnInfoTypeName = `STRUCT` + +const ColumnInfoTypeNameTimestamp ColumnInfoTypeName = `TIMESTAMP` + +const ColumnInfoTypeNameUserDefinedType ColumnInfoTypeName = `USER_DEFINED_TYPE` + +// String representation for [fmt.Print] +func (f *ColumnInfoTypeName) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ColumnInfoTypeName) Set(v string) error { + switch v { + case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TIMESTAMP`, `USER_DEFINED_TYPE`: + *f = ColumnInfoTypeName(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TIMESTAMP", "USER_DEFINED_TYPE"`, v) + } +} + +// Type always returns ColumnInfoTypeName to satisfy [pflag.Value] interface +func (f *ColumnInfoTypeName) Type() string { + return "ColumnInfoTypeName" +} + +type CreateAlert struct { + // Name of the alert. + Name string `json:"name"` + // Alert configuration options. + Options AlertOptions `json:"options"` + // The identifier of the workspace folder containing the object. + Parent string `json:"parent,omitempty"` + // Query ID. + QueryId string `json:"query_id"` + // Number of seconds after being triggered before the alert rearms itself + // and can be triggered again. If `null`, alert will never be triggered + // again. + Rearm int `json:"rearm,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateAlert) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateAlert) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateAlertRequest struct { + Alert *CreateAlertRequestAlert `json:"alert,omitempty"` +} + +type CreateAlertRequestAlert struct { + // Trigger conditions of the alert. + Condition *AlertCondition `json:"condition,omitempty"` + // Custom body of alert notification, if it exists. See [here] for custom + // templating instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomBody string `json:"custom_body,omitempty"` + // Custom subject of alert notification, if it exists. This can include + // email subject entries and Slack notification headers, for example. See + // [here] for custom templating instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomSubject string `json:"custom_subject,omitempty"` + // The display name of the alert. + DisplayName string `json:"display_name,omitempty"` + // Whether to notify alert subscribers when alert returns back to normal. + NotifyOnOk bool `json:"notify_on_ok,omitempty"` + // The workspace path of the folder containing the alert. + ParentPath string `json:"parent_path,omitempty"` + // UUID of the query attached to the alert. + QueryId string `json:"query_id,omitempty"` + // Number of seconds an alert must wait after being triggered to rearm + // itself. After rearming, it can be triggered again. If 0 or not specified, + // the alert will not be triggered again. + SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateAlertRequestAlert) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateAlertRequestAlert) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateQueryRequest struct { + Query *CreateQueryRequestQuery `json:"query,omitempty"` +} + +type CreateQueryRequestQuery struct { + // Whether to apply a 1000 row limit to the query result. + ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"` + // Name of the catalog where this query will be executed. + Catalog string `json:"catalog,omitempty"` + // General description that conveys additional information about this query + // such as usage notes. + Description string `json:"description,omitempty"` + // Display name of the query that appears in list views, widget headings, + // and on the query page. + DisplayName string `json:"display_name,omitempty"` + // List of query parameter definitions. + Parameters []QueryParameter `json:"parameters,omitempty"` + // Workspace path of the workspace folder containing the object. + ParentPath string `json:"parent_path,omitempty"` + // Text of the query to be run. + QueryText string `json:"query_text,omitempty"` + // Sets the "Run as" role for the object. + RunAsMode RunAsMode `json:"run_as_mode,omitempty"` + // Name of the schema where this query will be executed. + Schema string `json:"schema,omitempty"` + + Tags []string `json:"tags,omitempty"` + // ID of the SQL warehouse attached to the query. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateQueryRequestQuery) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateQueryRequestQuery) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Add visualization to a query +type CreateQueryVisualizationsLegacyRequest struct { + // A short description of this visualization. This is not displayed in the + // UI. + Description string `json:"description,omitempty"` + // The name of the visualization that appears on dashboards and the query + // screen. + Name string `json:"name,omitempty"` + // The options object varies widely from one visualization type to the next + // and is unsupported. Databricks does not recommend modifying visualization + // settings in JSON. + Options any `json:"options"` + // The identifier returned by :method:queries/create + QueryId string `json:"query_id"` + // The type of visualization: chart, table, pivot table, and so on. + Type string `json:"type"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateQueryVisualizationsLegacyRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateQueryVisualizationsLegacyRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateVisualizationRequest struct { + Visualization *CreateVisualizationRequestVisualization `json:"visualization,omitempty"` +} + +type CreateVisualizationRequestVisualization struct { + // The display name of the visualization. + DisplayName string `json:"display_name,omitempty"` + // UUID of the query that the visualization is attached to. + QueryId string `json:"query_id,omitempty"` + // The visualization options varies widely from one visualization type to + // the next and is unsupported. Databricks does not recommend modifying + // visualization options directly. + SerializedOptions string `json:"serialized_options,omitempty"` + // The visualization query plan varies widely from one visualization type to + // the next and is unsupported. Databricks does not recommend modifying the + // visualization query plan directly. + SerializedQueryPlan string `json:"serialized_query_plan,omitempty"` + // The type of visualization: counter, table, funnel, and so on. + Type string `json:"type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateVisualizationRequestVisualization) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateVisualizationRequestVisualization) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateWarehouseRequest struct { + // The amount of time in minutes that a SQL warehouse must be idle (i.e., no + // RUNNING queries) before it is automatically stopped. + // + // Supported values: - Must be >= 0 mins for serverless warehouses - Must be + // == 0 or >= 10 mins for non-serverless warehouses - 0 indicates no + // autostop. + // + // Defaults to 120 mins + AutoStopMins int `json:"auto_stop_mins,omitempty"` + // Channel Details + Channel *Channel `json:"channel,omitempty"` + // Size of the clusters allocated for this warehouse. Increasing the size of + // a spark cluster allows you to run larger queries on it. If you want to + // increase the number of concurrent queries, please tune max_num_clusters. + // + // Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large + // - 2X-Large - 3X-Large - 4X-Large + ClusterSize string `json:"cluster_size,omitempty"` + // warehouse creator name + CreatorName string `json:"creator_name,omitempty"` + // Configures whether the warehouse should use Photon optimized clusters. + // + // Defaults to false. + EnablePhoton bool `json:"enable_photon,omitempty"` + // Configures whether the warehouse should use serverless compute + EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"` + // Deprecated. Instance profile used to pass IAM role to the cluster + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // Maximum number of clusters that the autoscaler will create to handle + // concurrent queries. + // + // Supported values: - Must be >= min_num_clusters - Must be <= 30. + // + // Defaults to min_clusters if unset. + MaxNumClusters int `json:"max_num_clusters,omitempty"` + // Minimum number of available clusters that will be maintained for this SQL + // warehouse. Increasing this will ensure that a larger number of clusters + // are always running and therefore may reduce the cold start time for new + // queries. This is similar to reserved vs. revocable cores in a resource + // manager. + // + // Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30) + // + // Defaults to 1 + MinNumClusters int `json:"min_num_clusters,omitempty"` + // Logical name for the cluster. + // + // Supported values: - Must be unique within an org. - Must be less than 100 + // characters. + Name string `json:"name,omitempty"` + // Configurations whether the warehouse should use spot instances. + SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"` + // A set of key-value pairs that will be tagged on all resources (e.g., AWS + // instances and EBS volumes) associated with this SQL warehouse. + // + // Supported values: - Number of tags < 45. + Tags *EndpointTags `json:"tags,omitempty"` + // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless + // compute, you must set to `PRO` and also set the field + // `enable_serverless_compute` to `true`. + WarehouseType CreateWarehouseRequestWarehouseType `json:"warehouse_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateWarehouseRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateWarehouseRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, +// you must set to `PRO` and also set the field `enable_serverless_compute` to +// `true`. +type CreateWarehouseRequestWarehouseType string + +const CreateWarehouseRequestWarehouseTypeClassic CreateWarehouseRequestWarehouseType = `CLASSIC` + +const CreateWarehouseRequestWarehouseTypePro CreateWarehouseRequestWarehouseType = `PRO` + +const CreateWarehouseRequestWarehouseTypeTypeUnspecified CreateWarehouseRequestWarehouseType = `TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *CreateWarehouseRequestWarehouseType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreateWarehouseRequestWarehouseType) Set(v string) error { + switch v { + case `CLASSIC`, `PRO`, `TYPE_UNSPECIFIED`: + *f = CreateWarehouseRequestWarehouseType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC", "PRO", "TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns CreateWarehouseRequestWarehouseType to satisfy [pflag.Value] interface +func (f *CreateWarehouseRequestWarehouseType) Type() string { + return "CreateWarehouseRequestWarehouseType" +} + +type CreateWarehouseResponse struct { + // Id for the SQL warehouse. This value is unique across all SQL warehouses. + Id string `json:"id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateWarehouseResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateWarehouseResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateWidget struct { + // Dashboard ID returned by :method:dashboards/create. + DashboardId string `json:"dashboard_id"` + // Widget ID returned by :method:dashboardwidgets/create + Id string `json:"-" url:"-"` + + Options WidgetOptions `json:"options"` + // If this is a textbox widget, the application displays this text. This + // field is ignored if the widget contains a visualization in the + // `visualization` field. + Text string `json:"text,omitempty"` + // Query Vizualization ID returned by :method:queryvisualizations/create. + VisualizationId string `json:"visualization_id,omitempty"` + // Width of a widget + Width int `json:"width"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateWidget) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateWidget) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// A JSON representing a dashboard containing widgets of visualizations and text +// boxes. +type Dashboard struct { + // Whether the authenticated user can edit the query definition. + CanEdit bool `json:"can_edit,omitempty"` + // Timestamp when this dashboard was created. + CreatedAt string `json:"created_at,omitempty"` + // In the web application, query filters that share a name are coupled to a + // single selection box if this value is `true`. + DashboardFiltersEnabled bool `json:"dashboard_filters_enabled,omitempty"` + // The ID for this dashboard. + Id string `json:"id,omitempty"` + // Indicates whether a dashboard is trashed. Trashed dashboards won't appear + // in list views. If this boolean is `true`, the `options` property for this + // dashboard includes a `moved_to_trash_at` timestamp. Items in trash are + // permanently deleted after 30 days. + IsArchived bool `json:"is_archived,omitempty"` + // Whether a dashboard is a draft. Draft dashboards only appear in list + // views for their owners. + IsDraft bool `json:"is_draft,omitempty"` + // Indicates whether this query object appears in the current user's + // favorites list. This flag determines whether the star icon for favorites + // is selected. + IsFavorite bool `json:"is_favorite,omitempty"` + // The title of the dashboard that appears in list views and at the top of + // the dashboard page. + Name string `json:"name,omitempty"` + + Options *DashboardOptions `json:"options,omitempty"` + // The identifier of the workspace folder containing the object. + Parent string `json:"parent,omitempty"` + // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * + // `CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query + PermissionTier PermissionLevel `json:"permission_tier,omitempty"` + // URL slug. Usually mirrors the query name with dashes (`-`) instead of + // spaces. Appears in the URL for this query. + Slug string `json:"slug,omitempty"` + + Tags []string `json:"tags,omitempty"` + // Timestamp when this dashboard was last updated. + UpdatedAt string `json:"updated_at,omitempty"` + + User *User `json:"user,omitempty"` + // The ID of the user who owns the dashboard. + UserId int `json:"user_id,omitempty"` + + Widgets []Widget `json:"widgets,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Dashboard) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Dashboard) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DashboardEditContent struct { + DashboardId string `json:"-" url:"-"` + // The title of this dashboard that appears in list views and at the top of + // the dashboard page. + Name string `json:"name,omitempty"` + // Sets the **Run as** role for the object. Must be set to one of `"viewer"` + // (signifying "run as viewer" behavior) or `"owner"` (signifying "run as + // owner" behavior) + RunAsRole RunAsRole `json:"run_as_role,omitempty"` + + Tags []string `json:"tags,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DashboardEditContent) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DashboardEditContent) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DashboardOptions struct { + // The timestamp when this dashboard was moved to trash. Only present when + // the `is_archived` property is `true`. Trashed items are deleted after + // thirty days. + MovedToTrashAt string `json:"moved_to_trash_at,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DashboardOptions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DashboardOptions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DashboardPostContent struct { + // Indicates whether the dashboard filters are enabled + DashboardFiltersEnabled bool `json:"dashboard_filters_enabled,omitempty"` + // Indicates whether this dashboard object should appear in the current + // user's favorites list. + IsFavorite bool `json:"is_favorite,omitempty"` + // The title of this dashboard that appears in list views and at the top of + // the dashboard page. + Name string `json:"name"` + // The identifier of the workspace folder containing the object. + Parent string `json:"parent,omitempty"` + // Sets the **Run as** role for the object. Must be set to one of `"viewer"` + // (signifying "run as viewer" behavior) or `"owner"` (signifying "run as + // owner" behavior) + RunAsRole RunAsRole `json:"run_as_role,omitempty"` + + Tags []string `json:"tags,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DashboardPostContent) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DashboardPostContent) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// A JSON object representing a DBSQL data source / SQL warehouse. +type DataSource struct { + // Data source ID maps to the ID of the data source used by the resource and + // is distinct from the warehouse ID. [Learn more] + // + // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + Id string `json:"id,omitempty"` + // The string name of this data source / SQL warehouse as it appears in the + // Databricks SQL web application. + Name string `json:"name,omitempty"` + // Reserved for internal use. + PauseReason string `json:"pause_reason,omitempty"` + // Reserved for internal use. + Paused int `json:"paused,omitempty"` + // Reserved for internal use. + SupportsAutoLimit bool `json:"supports_auto_limit,omitempty"` + // Reserved for internal use. + Syntax string `json:"syntax,omitempty"` + // The type of data source. For SQL warehouses, this will be + // `databricks_internal`. + Type string `json:"type,omitempty"` + // Reserved for internal use. + ViewOnly bool `json:"view_only,omitempty"` + // The ID of the associated SQL warehouse, if this data source is backed by + // a SQL warehouse. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DataSource) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DataSource) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DatePrecision string + +const DatePrecisionDayPrecision DatePrecision = `DAY_PRECISION` + +const DatePrecisionMinutePrecision DatePrecision = `MINUTE_PRECISION` + +const DatePrecisionSecondPrecision DatePrecision = `SECOND_PRECISION` + +// String representation for [fmt.Print] +func (f *DatePrecision) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DatePrecision) Set(v string) error { + switch v { + case `DAY_PRECISION`, `MINUTE_PRECISION`, `SECOND_PRECISION`: + *f = DatePrecision(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DAY_PRECISION", "MINUTE_PRECISION", "SECOND_PRECISION"`, v) + } +} + +// Type always returns DatePrecision to satisfy [pflag.Value] interface +func (f *DatePrecision) Type() string { + return "DatePrecision" +} + +type DateRange struct { + End string `json:"end"` + + Start string `json:"start"` +} + +type DateRangeValue struct { + // Manually specified date-time range value. + DateRangeValue *DateRange `json:"date_range_value,omitempty"` + // Dynamic date-time range value based on current date-time. + DynamicDateRangeValue DateRangeValueDynamicDateRange `json:"dynamic_date_range_value,omitempty"` + // Date-time precision to format the value into when the query is run. + // Defaults to DAY_PRECISION (YYYY-MM-DD). + Precision DatePrecision `json:"precision,omitempty"` + + StartDayOfWeek int `json:"start_day_of_week,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DateRangeValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DateRangeValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DateRangeValueDynamicDateRange string + +const DateRangeValueDynamicDateRangeLast12Months DateRangeValueDynamicDateRange = `LAST_12_MONTHS` + +const DateRangeValueDynamicDateRangeLast14Days DateRangeValueDynamicDateRange = `LAST_14_DAYS` + +const DateRangeValueDynamicDateRangeLast24Hours DateRangeValueDynamicDateRange = `LAST_24_HOURS` + +const DateRangeValueDynamicDateRangeLast30Days DateRangeValueDynamicDateRange = `LAST_30_DAYS` + +const DateRangeValueDynamicDateRangeLast60Days DateRangeValueDynamicDateRange = `LAST_60_DAYS` + +const DateRangeValueDynamicDateRangeLast7Days DateRangeValueDynamicDateRange = `LAST_7_DAYS` + +const DateRangeValueDynamicDateRangeLast8Hours DateRangeValueDynamicDateRange = `LAST_8_HOURS` + +const DateRangeValueDynamicDateRangeLast90Days DateRangeValueDynamicDateRange = `LAST_90_DAYS` + +const DateRangeValueDynamicDateRangeLastHour DateRangeValueDynamicDateRange = `LAST_HOUR` + +const DateRangeValueDynamicDateRangeLastMonth DateRangeValueDynamicDateRange = `LAST_MONTH` + +const DateRangeValueDynamicDateRangeLastWeek DateRangeValueDynamicDateRange = `LAST_WEEK` + +const DateRangeValueDynamicDateRangeLastYear DateRangeValueDynamicDateRange = `LAST_YEAR` + +const DateRangeValueDynamicDateRangeThisMonth DateRangeValueDynamicDateRange = `THIS_MONTH` + +const DateRangeValueDynamicDateRangeThisWeek DateRangeValueDynamicDateRange = `THIS_WEEK` + +const DateRangeValueDynamicDateRangeThisYear DateRangeValueDynamicDateRange = `THIS_YEAR` + +const DateRangeValueDynamicDateRangeToday DateRangeValueDynamicDateRange = `TODAY` + +const DateRangeValueDynamicDateRangeYesterday DateRangeValueDynamicDateRange = `YESTERDAY` + +// String representation for [fmt.Print] +func (f *DateRangeValueDynamicDateRange) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DateRangeValueDynamicDateRange) Set(v string) error { + switch v { + case `LAST_12_MONTHS`, `LAST_14_DAYS`, `LAST_24_HOURS`, `LAST_30_DAYS`, `LAST_60_DAYS`, `LAST_7_DAYS`, `LAST_8_HOURS`, `LAST_90_DAYS`, `LAST_HOUR`, `LAST_MONTH`, `LAST_WEEK`, `LAST_YEAR`, `THIS_MONTH`, `THIS_WEEK`, `THIS_YEAR`, `TODAY`, `YESTERDAY`: + *f = DateRangeValueDynamicDateRange(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "LAST_12_MONTHS", "LAST_14_DAYS", "LAST_24_HOURS", "LAST_30_DAYS", "LAST_60_DAYS", "LAST_7_DAYS", "LAST_8_HOURS", "LAST_90_DAYS", "LAST_HOUR", "LAST_MONTH", "LAST_WEEK", "LAST_YEAR", "THIS_MONTH", "THIS_WEEK", "THIS_YEAR", "TODAY", "YESTERDAY"`, v) + } +} + +// Type always returns DateRangeValueDynamicDateRange to satisfy [pflag.Value] interface +func (f *DateRangeValueDynamicDateRange) Type() string { + return "DateRangeValueDynamicDateRange" +} + +type DateValue struct { + // Manually specified date-time value. + DateValue string `json:"date_value,omitempty"` + // Dynamic date-time value based on current date-time. + DynamicDateValue DateValueDynamicDate `json:"dynamic_date_value,omitempty"` + // Date-time precision to format the value into when the query is run. + // Defaults to DAY_PRECISION (YYYY-MM-DD). + Precision DatePrecision `json:"precision,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DateValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DateValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DateValueDynamicDate string + +const DateValueDynamicDateNow DateValueDynamicDate = `NOW` + +const DateValueDynamicDateYesterday DateValueDynamicDate = `YESTERDAY` + +// String representation for [fmt.Print] +func (f *DateValueDynamicDate) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DateValueDynamicDate) Set(v string) error { + switch v { + case `NOW`, `YESTERDAY`: + *f = DateValueDynamicDate(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "NOW", "YESTERDAY"`, v) + } +} + +// Type always returns DateValueDynamicDate to satisfy [pflag.Value] interface +func (f *DateValueDynamicDate) Type() string { + return "DateValueDynamicDate" +} + +// Delete an alert +type DeleteAlertsLegacyRequest struct { + AlertId string `json:"-" url:"-"` +} + +// Remove a dashboard +type DeleteDashboardRequest struct { + DashboardId string `json:"-" url:"-"` +} + +// Remove widget +type DeleteDashboardWidgetRequest struct { + // Widget ID returned by :method:dashboardwidgets/create + Id string `json:"-" url:"-"` +} + +// Delete a query +type DeleteQueriesLegacyRequest struct { + QueryId string `json:"-" url:"-"` +} + +// Remove visualization +type DeleteQueryVisualizationsLegacyRequest struct { + // Widget ID returned by :method:queryvizualisations/create + Id string `json:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Remove a visualization +type DeleteVisualizationRequest struct { + Id string `json:"-" url:"-"` +} + +// Delete a warehouse +type DeleteWarehouseRequest struct { + // Required. Id of the SQL warehouse. + Id string `json:"-" url:"-"` +} + +type DeleteWarehouseResponse struct { +} + +type Disposition string + +const DispositionExternalLinks Disposition = `EXTERNAL_LINKS` + +const DispositionInline Disposition = `INLINE` + +// String representation for [fmt.Print] +func (f *Disposition) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Disposition) Set(v string) error { + switch v { + case `EXTERNAL_LINKS`, `INLINE`: + *f = Disposition(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EXTERNAL_LINKS", "INLINE"`, v) + } +} + +// Type always returns Disposition to satisfy [pflag.Value] interface +func (f *Disposition) Type() string { + return "Disposition" +} + +type EditAlert struct { + AlertId string `json:"-" url:"-"` + // Name of the alert. + Name string `json:"name"` + // Alert configuration options. + Options AlertOptions `json:"options"` + // Query ID. + QueryId string `json:"query_id"` + // Number of seconds after being triggered before the alert rearms itself + // and can be triggered again. If `null`, alert will never be triggered + // again. + Rearm int `json:"rearm,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EditAlert) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EditAlert) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EditWarehouseRequest struct { + // The amount of time in minutes that a SQL warehouse must be idle (i.e., no + // RUNNING queries) before it is automatically stopped. + // + // Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + // + // Defaults to 120 mins + AutoStopMins int `json:"auto_stop_mins,omitempty"` + // Channel Details + Channel *Channel `json:"channel,omitempty"` + // Size of the clusters allocated for this warehouse. Increasing the size of + // a spark cluster allows you to run larger queries on it. If you want to + // increase the number of concurrent queries, please tune max_num_clusters. + // + // Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large + // - 2X-Large - 3X-Large - 4X-Large + ClusterSize string `json:"cluster_size,omitempty"` + // warehouse creator name + CreatorName string `json:"creator_name,omitempty"` + // Configures whether the warehouse should use Photon optimized clusters. + // + // Defaults to false. + EnablePhoton bool `json:"enable_photon,omitempty"` + // Configures whether the warehouse should use serverless compute. + EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"` + // Required. Id of the warehouse to configure. + Id string `json:"-" url:"-"` + // Deprecated. Instance profile used to pass IAM role to the cluster + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // Maximum number of clusters that the autoscaler will create to handle + // concurrent queries. + // + // Supported values: - Must be >= min_num_clusters - Must be <= 30. + // + // Defaults to min_clusters if unset. + MaxNumClusters int `json:"max_num_clusters,omitempty"` + // Minimum number of available clusters that will be maintained for this SQL + // warehouse. Increasing this will ensure that a larger number of clusters + // are always running and therefore may reduce the cold start time for new + // queries. This is similar to reserved vs. revocable cores in a resource + // manager. + // + // Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30) + // + // Defaults to 1 + MinNumClusters int `json:"min_num_clusters,omitempty"` + // Logical name for the cluster. + // + // Supported values: - Must be unique within an org. - Must be less than 100 + // characters. + Name string `json:"name,omitempty"` + // Configurations whether the warehouse should use spot instances. + SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"` + // A set of key-value pairs that will be tagged on all resources (e.g., AWS + // instances and EBS volumes) associated with this SQL warehouse. + // + // Supported values: - Number of tags < 45. + Tags *EndpointTags `json:"tags,omitempty"` + // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless + // compute, you must set to `PRO` and also set the field + // `enable_serverless_compute` to `true`. + WarehouseType EditWarehouseRequestWarehouseType `json:"warehouse_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EditWarehouseRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EditWarehouseRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, +// you must set to `PRO` and also set the field `enable_serverless_compute` to +// `true`. +type EditWarehouseRequestWarehouseType string + +const EditWarehouseRequestWarehouseTypeClassic EditWarehouseRequestWarehouseType = `CLASSIC` + +const EditWarehouseRequestWarehouseTypePro EditWarehouseRequestWarehouseType = `PRO` + +const EditWarehouseRequestWarehouseTypeTypeUnspecified EditWarehouseRequestWarehouseType = `TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *EditWarehouseRequestWarehouseType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EditWarehouseRequestWarehouseType) Set(v string) error { + switch v { + case `CLASSIC`, `PRO`, `TYPE_UNSPECIFIED`: + *f = EditWarehouseRequestWarehouseType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC", "PRO", "TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns EditWarehouseRequestWarehouseType to satisfy [pflag.Value] interface +func (f *EditWarehouseRequestWarehouseType) Type() string { + return "EditWarehouseRequestWarehouseType" +} + +type EditWarehouseResponse struct { +} + +// Represents an empty message, similar to google.protobuf.Empty, which is not +// available in the firm right now. +type Empty struct { +} + +type EndpointConfPair struct { + Key string `json:"key,omitempty"` + + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EndpointConfPair) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EndpointConfPair) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EndpointHealth struct { + // Details about errors that are causing current degraded/failed status. + Details string `json:"details,omitempty"` + // The reason for failure to bring up clusters for this warehouse. This is + // available when status is 'FAILED' and sometimes when it is DEGRADED. + FailureReason *TerminationReason `json:"failure_reason,omitempty"` + // Deprecated. split into summary and details for security + Message string `json:"message,omitempty"` + // Health status of the warehouse. + Status Status `json:"status,omitempty"` + // A short summary of the health status in case of degraded/failed + // warehouses. + Summary string `json:"summary,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EndpointHealth) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EndpointHealth) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EndpointInfo struct { + // The amount of time in minutes that a SQL warehouse must be idle (i.e., no + // RUNNING queries) before it is automatically stopped. + // + // Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + // + // Defaults to 120 mins + AutoStopMins int `json:"auto_stop_mins,omitempty"` + // Channel Details + Channel *Channel `json:"channel,omitempty"` + // Size of the clusters allocated for this warehouse. Increasing the size of + // a spark cluster allows you to run larger queries on it. If you want to + // increase the number of concurrent queries, please tune max_num_clusters. + // + // Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large + // - 2X-Large - 3X-Large - 4X-Large + ClusterSize string `json:"cluster_size,omitempty"` + // warehouse creator name + CreatorName string `json:"creator_name,omitempty"` + // Configures whether the warehouse should use Photon optimized clusters. + // + // Defaults to false. + EnablePhoton bool `json:"enable_photon,omitempty"` + // Configures whether the warehouse should use serverless compute + EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"` + // Optional health status. Assume the warehouse is healthy if this field is + // not set. + Health *EndpointHealth `json:"health,omitempty"` + // unique identifier for warehouse + Id string `json:"id,omitempty"` + // Deprecated. Instance profile used to pass IAM role to the cluster + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // the jdbc connection string for this warehouse + JdbcUrl string `json:"jdbc_url,omitempty"` + // Maximum number of clusters that the autoscaler will create to handle + // concurrent queries. + // + // Supported values: - Must be >= min_num_clusters - Must be <= 30. + // + // Defaults to min_clusters if unset. + MaxNumClusters int `json:"max_num_clusters,omitempty"` + // Minimum number of available clusters that will be maintained for this SQL + // warehouse. Increasing this will ensure that a larger number of clusters + // are always running and therefore may reduce the cold start time for new + // queries. This is similar to reserved vs. revocable cores in a resource + // manager. + // + // Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30) + // + // Defaults to 1 + MinNumClusters int `json:"min_num_clusters,omitempty"` + // Logical name for the cluster. + // + // Supported values: - Must be unique within an org. - Must be less than 100 + // characters. + Name string `json:"name,omitempty"` + // current number of active sessions for the warehouse + NumActiveSessions int64 `json:"num_active_sessions,omitempty"` + // current number of clusters running for the service + NumClusters int `json:"num_clusters,omitempty"` + // ODBC parameters for the SQL warehouse + OdbcParams *OdbcParams `json:"odbc_params,omitempty"` + // Configurations whether the warehouse should use spot instances. + SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"` + // State of the warehouse + State State `json:"state,omitempty"` + // A set of key-value pairs that will be tagged on all resources (e.g., AWS + // instances and EBS volumes) associated with this SQL warehouse. + // + // Supported values: - Number of tags < 45. + Tags *EndpointTags `json:"tags,omitempty"` + // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless + // compute, you must set to `PRO` and also set the field + // `enable_serverless_compute` to `true`. + WarehouseType EndpointInfoWarehouseType `json:"warehouse_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EndpointInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EndpointInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, +// you must set to `PRO` and also set the field `enable_serverless_compute` to +// `true`. +type EndpointInfoWarehouseType string + +const EndpointInfoWarehouseTypeClassic EndpointInfoWarehouseType = `CLASSIC` + +const EndpointInfoWarehouseTypePro EndpointInfoWarehouseType = `PRO` + +const EndpointInfoWarehouseTypeTypeUnspecified EndpointInfoWarehouseType = `TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *EndpointInfoWarehouseType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointInfoWarehouseType) Set(v string) error { + switch v { + case `CLASSIC`, `PRO`, `TYPE_UNSPECIFIED`: + *f = EndpointInfoWarehouseType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC", "PRO", "TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns EndpointInfoWarehouseType to satisfy [pflag.Value] interface +func (f *EndpointInfoWarehouseType) Type() string { + return "EndpointInfoWarehouseType" +} + +type EndpointTagPair struct { + Key string `json:"key,omitempty"` + + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EndpointTagPair) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EndpointTagPair) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EndpointTags struct { + CustomTags []EndpointTagPair `json:"custom_tags,omitempty"` +} + +type EnumValue struct { + // List of valid query parameter values, newline delimited. + EnumOptions string `json:"enum_options,omitempty"` + // If specified, allows multiple values to be selected for this parameter. + MultiValuesOptions *MultiValuesOptions `json:"multi_values_options,omitempty"` + // List of selected query parameter values. + Values []string `json:"values,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EnumValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnumValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExecuteStatementRequest struct { + // Applies the given byte limit to the statement's result size. Byte counts + // are based on internal data representations and might not match the final + // size in the requested `format`. If the result was truncated due to the + // byte limit, then `truncated` in the response is set to `true`. When using + // `EXTERNAL_LINKS` disposition, a default `byte_limit` of 100 GiB is + // applied if `byte_limit` is not explcitly set. + ByteLimit int64 `json:"byte_limit,omitempty"` + // Sets default catalog for statement execution, similar to [`USE CATALOG`] + // in SQL. + // + // [`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html + Catalog string `json:"catalog,omitempty"` + + Disposition Disposition `json:"disposition,omitempty"` + // Statement execution supports three result formats: `JSON_ARRAY` + // (default), `ARROW_STREAM`, and `CSV`. + // + // Important: The formats `ARROW_STREAM` and `CSV` are supported only with + // `EXTERNAL_LINKS` disposition. `JSON_ARRAY` is supported in `INLINE` and + // `EXTERNAL_LINKS` disposition. + // + // When specifying `format=JSON_ARRAY`, result data will be formatted as an + // array of arrays of values, where each value is either the *string + // representation* of a value, or `null`. For example, the output of `SELECT + // concat('id-', id) AS strCol, id AS intCol, null AS nullCol FROM range(3)` + // would look like this: + // + // ``` [ [ "id-1", "1", null ], [ "id-2", "2", null ], [ "id-3", "3", null + // ], ] ``` + // + // When specifying `format=JSON_ARRAY` and `disposition=EXTERNAL_LINKS`, + // each chunk in the result contains compact JSON with no indentation or + // extra whitespace. + // + // When specifying `format=ARROW_STREAM` and `disposition=EXTERNAL_LINKS`, + // each chunk in the result will be formatted as Apache Arrow Stream. See + // the [Apache Arrow streaming format]. + // + // When specifying `format=CSV` and `disposition=EXTERNAL_LINKS`, each chunk + // in the result will be a CSV according to [RFC 4180] standard. All the + // columns values will have *string representation* similar to the + // `JSON_ARRAY` format, and `null` values will be encoded as “null”. + // Only the first chunk in the result would contain a header row with column + // names. For example, the output of `SELECT concat('id-', id) AS strCol, id + // AS intCol, null as nullCol FROM range(3)` would look like this: + // + // ``` strCol,intCol,nullCol id-1,1,null id-2,2,null id-3,3,null ``` + // + // [Apache Arrow streaming format]: https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format + // [RFC 4180]: https://www.rfc-editor.org/rfc/rfc4180 + Format Format `json:"format,omitempty"` + // When `wait_timeout > 0s`, the call will block up to the specified time. + // If the statement execution doesn't finish within this time, + // `on_wait_timeout` determines whether the execution should continue or be + // canceled. When set to `CONTINUE`, the statement execution continues + // asynchronously and the call returns a statement ID which can be used for + // polling with :method:statementexecution/getStatement. When set to + // `CANCEL`, the statement execution is canceled and the call returns with a + // `CANCELED` state. + OnWaitTimeout ExecuteStatementRequestOnWaitTimeout `json:"on_wait_timeout,omitempty"` + // A list of parameters to pass into a SQL statement containing parameter + // markers. A parameter consists of a name, a value, and optionally a type. + // To represent a NULL value, the `value` field may be omitted or set to + // `null` explicitly. If the `type` field is omitted, the value is + // interpreted as a string. + // + // If the type is given, parameters will be checked for type correctness + // according to the given type. A value is correct if the provided string + // can be converted to the requested type using the `cast` function. The + // exact semantics are described in the section [`cast` function] of the SQL + // language reference. + // + // For example, the following statement contains two parameters, `my_name` + // and `my_date`: + // + // SELECT * FROM my_table WHERE name = :my_name AND date = :my_date + // + // The parameters can be passed in the request body as follows: + // + // { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND + // date = :my_date", "parameters": [ { "name": "my_name", "value": "the + // name" }, { "name": "my_date", "value": "2020-01-01", "type": "DATE" } ] } + // + // Currently, positional parameters denoted by a `?` marker are not + // supported by the Databricks SQL Statement Execution API. + // + // Also see the section [Parameter markers] of the SQL language reference. + // + // [Parameter markers]: https://docs.databricks.com/sql/language-manual/sql-ref-parameter-marker.html + // [`cast` function]: https://docs.databricks.com/sql/language-manual/functions/cast.html + Parameters []StatementParameterListItem `json:"parameters,omitempty"` + // Applies the given row limit to the statement's result set, but unlike the + // `LIMIT` clause in SQL, it also sets the `truncated` field in the response + // to indicate whether the result was trimmed due to the limit or not. + RowLimit int64 `json:"row_limit,omitempty"` + // Sets default schema for statement execution, similar to [`USE SCHEMA`] in + // SQL. + // + // [`USE SCHEMA`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-schema.html + Schema string `json:"schema,omitempty"` + // The SQL statement to execute. The statement can optionally be + // parameterized, see `parameters`. + Statement string `json:"statement"` + // The time in seconds the call will wait for the statement's result set as + // `Ns`, where `N` can be set to 0 or to a value between 5 and 50. + // + // When set to `0s`, the statement will execute in asynchronous mode and the + // call will not wait for the execution to finish. In this case, the call + // returns directly with `PENDING` state and a statement ID which can be + // used for polling with :method:statementexecution/getStatement. + // + // When set between 5 and 50 seconds, the call will behave synchronously up + // to this timeout and wait for the statement execution to finish. If the + // execution finishes within this time, the call returns immediately with a + // manifest and result data (or a `FAILED` state in case of an execution + // error). If the statement takes longer to execute, `on_wait_timeout` + // determines what should happen after the timeout is reached. + WaitTimeout string `json:"wait_timeout,omitempty"` + // Warehouse upon which to execute a statement. See also [What are SQL + // warehouses?] + // + // [What are SQL warehouses?]: https://docs.databricks.com/sql/admin/warehouse-type.html + WarehouseId string `json:"warehouse_id"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExecuteStatementRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExecuteStatementRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// When `wait_timeout > 0s`, the call will block up to the specified time. If +// the statement execution doesn't finish within this time, `on_wait_timeout` +// determines whether the execution should continue or be canceled. When set to +// `CONTINUE`, the statement execution continues asynchronously and the call +// returns a statement ID which can be used for polling with +// :method:statementexecution/getStatement. When set to `CANCEL`, the statement +// execution is canceled and the call returns with a `CANCELED` state. +type ExecuteStatementRequestOnWaitTimeout string + +const ExecuteStatementRequestOnWaitTimeoutCancel ExecuteStatementRequestOnWaitTimeout = `CANCEL` + +const ExecuteStatementRequestOnWaitTimeoutContinue ExecuteStatementRequestOnWaitTimeout = `CONTINUE` + +// String representation for [fmt.Print] +func (f *ExecuteStatementRequestOnWaitTimeout) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExecuteStatementRequestOnWaitTimeout) Set(v string) error { + switch v { + case `CANCEL`, `CONTINUE`: + *f = ExecuteStatementRequestOnWaitTimeout(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCEL", "CONTINUE"`, v) + } +} + +// Type always returns ExecuteStatementRequestOnWaitTimeout to satisfy [pflag.Value] interface +func (f *ExecuteStatementRequestOnWaitTimeout) Type() string { + return "ExecuteStatementRequestOnWaitTimeout" +} + +type ExternalLink struct { + // The number of bytes in the result chunk. This field is not available when + // using `INLINE` disposition. + ByteCount int64 `json:"byte_count,omitempty"` + // The position within the sequence of result set chunks. + ChunkIndex int `json:"chunk_index,omitempty"` + // Indicates the date-time that the given external link will expire and + // becomes invalid, after which point a new `external_link` must be + // requested. + Expiration string `json:"expiration,omitempty"` + + ExternalLink string `json:"external_link,omitempty"` + // HTTP headers that must be included with a GET request to the + // `external_link`. Each header is provided as a key-value pair. Headers are + // typically used to pass a decryption key to the external service. The + // values of these headers should be considered sensitive and the client + // should not expose these values in a log. + HttpHeaders map[string]string `json:"http_headers,omitempty"` + // When fetching, provides the `chunk_index` for the _next_ chunk. If + // absent, indicates there are no more chunks. The next chunk can be fetched + // with a :method:statementexecution/getStatementResultChunkN request. + NextChunkIndex int `json:"next_chunk_index,omitempty"` + // When fetching, provides a link to fetch the _next_ chunk. If absent, + // indicates there are no more chunks. This link is an absolute `path` to be + // joined with your `$DATABRICKS_HOST`, and should be treated as an opaque + // link. This is an alternative to using `next_chunk_index`. + NextChunkInternalLink string `json:"next_chunk_internal_link,omitempty"` + // The number of rows within the result chunk. + RowCount int64 `json:"row_count,omitempty"` + // The starting row offset within the result set. + RowOffset int64 `json:"row_offset,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExternalLink) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExternalLink) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Format string + +const FormatArrowStream Format = `ARROW_STREAM` + +const FormatCsv Format = `CSV` + +const FormatJsonArray Format = `JSON_ARRAY` + +// String representation for [fmt.Print] +func (f *Format) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Format) Set(v string) error { + switch v { + case `ARROW_STREAM`, `CSV`, `JSON_ARRAY`: + *f = Format(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARROW_STREAM", "CSV", "JSON_ARRAY"`, v) + } +} + +// Type always returns Format to satisfy [pflag.Value] interface +func (f *Format) Type() string { + return "Format" +} + +// Get an alert +type GetAlertRequest struct { + Id string `json:"-" url:"-"` +} + +// Get an alert +type GetAlertsLegacyRequest struct { + AlertId string `json:"-" url:"-"` +} + +// Retrieve a definition +type GetDashboardRequest struct { + DashboardId string `json:"-" url:"-"` +} + +// Get object ACL +type GetDbsqlPermissionRequest struct { + // Object ID. An ACL is returned for the object with this UUID. + ObjectId string `json:"-" url:"-"` + // The type of object permissions to check. + ObjectType ObjectTypePlural `json:"-" url:"-"` +} + +// Get a query definition. +type GetQueriesLegacyRequest struct { + QueryId string `json:"-" url:"-"` +} + +// Get a query +type GetQueryRequest struct { + Id string `json:"-" url:"-"` +} + +type GetResponse struct { + AccessControlList []AccessControl `json:"access_control_list,omitempty"` + // An object's type and UUID, separated by a forward slash (/) character. + ObjectId string `json:"object_id,omitempty"` + // A singular noun object type. + ObjectType ObjectType `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get status, manifest, and result first chunk +type GetStatementRequest struct { + // The statement ID is returned upon successfully submitting a SQL + // statement, and is a required reference for all subsequent calls. + StatementId string `json:"-" url:"-"` +} + +// Get result chunk by index +type GetStatementResultChunkNRequest struct { + ChunkIndex int `json:"-" url:"-"` + // The statement ID is returned upon successfully submitting a SQL + // statement, and is a required reference for all subsequent calls. + StatementId string `json:"-" url:"-"` +} + +// Get SQL warehouse permission levels +type GetWarehousePermissionLevelsRequest struct { + // The SQL warehouse for which to get or manage permissions. + WarehouseId string `json:"-" url:"-"` +} + +type GetWarehousePermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []WarehousePermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get SQL warehouse permissions +type GetWarehousePermissionsRequest struct { + // The SQL warehouse for which to get or manage permissions. + WarehouseId string `json:"-" url:"-"` +} + +// Get warehouse info +type GetWarehouseRequest struct { + // Required. Id of the SQL warehouse. + Id string `json:"-" url:"-"` +} + +type GetWarehouseResponse struct { + // The amount of time in minutes that a SQL warehouse must be idle (i.e., no + // RUNNING queries) before it is automatically stopped. + // + // Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + // + // Defaults to 120 mins + AutoStopMins int `json:"auto_stop_mins,omitempty"` + // Channel Details + Channel *Channel `json:"channel,omitempty"` + // Size of the clusters allocated for this warehouse. Increasing the size of + // a spark cluster allows you to run larger queries on it. If you want to + // increase the number of concurrent queries, please tune max_num_clusters. + // + // Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large + // - 2X-Large - 3X-Large - 4X-Large + ClusterSize string `json:"cluster_size,omitempty"` + // warehouse creator name + CreatorName string `json:"creator_name,omitempty"` + // Configures whether the warehouse should use Photon optimized clusters. + // + // Defaults to false. + EnablePhoton bool `json:"enable_photon,omitempty"` + // Configures whether the warehouse should use serverless compute + EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"` + // Optional health status. Assume the warehouse is healthy if this field is + // not set. + Health *EndpointHealth `json:"health,omitempty"` + // unique identifier for warehouse + Id string `json:"id,omitempty"` + // Deprecated. Instance profile used to pass IAM role to the cluster + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // the jdbc connection string for this warehouse + JdbcUrl string `json:"jdbc_url,omitempty"` + // Maximum number of clusters that the autoscaler will create to handle + // concurrent queries. + // + // Supported values: - Must be >= min_num_clusters - Must be <= 30. + // + // Defaults to min_clusters if unset. + MaxNumClusters int `json:"max_num_clusters,omitempty"` + // Minimum number of available clusters that will be maintained for this SQL + // warehouse. Increasing this will ensure that a larger number of clusters + // are always running and therefore may reduce the cold start time for new + // queries. This is similar to reserved vs. revocable cores in a resource + // manager. + // + // Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30) + // + // Defaults to 1 + MinNumClusters int `json:"min_num_clusters,omitempty"` + // Logical name for the cluster. + // + // Supported values: - Must be unique within an org. - Must be less than 100 + // characters. + Name string `json:"name,omitempty"` + // current number of active sessions for the warehouse + NumActiveSessions int64 `json:"num_active_sessions,omitempty"` + // current number of clusters running for the service + NumClusters int `json:"num_clusters,omitempty"` + // ODBC parameters for the SQL warehouse + OdbcParams *OdbcParams `json:"odbc_params,omitempty"` + // Configurations whether the warehouse should use spot instances. + SpotInstancePolicy SpotInstancePolicy `json:"spot_instance_policy,omitempty"` + // State of the warehouse + State State `json:"state,omitempty"` + // A set of key-value pairs that will be tagged on all resources (e.g., AWS + // instances and EBS volumes) associated with this SQL warehouse. + // + // Supported values: - Number of tags < 45. + Tags *EndpointTags `json:"tags,omitempty"` + // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless + // compute, you must set to `PRO` and also set the field + // `enable_serverless_compute` to `true`. + WarehouseType GetWarehouseResponseWarehouseType `json:"warehouse_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetWarehouseResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetWarehouseResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, +// you must set to `PRO` and also set the field `enable_serverless_compute` to +// `true`. +type GetWarehouseResponseWarehouseType string + +const GetWarehouseResponseWarehouseTypeClassic GetWarehouseResponseWarehouseType = `CLASSIC` + +const GetWarehouseResponseWarehouseTypePro GetWarehouseResponseWarehouseType = `PRO` + +const GetWarehouseResponseWarehouseTypeTypeUnspecified GetWarehouseResponseWarehouseType = `TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *GetWarehouseResponseWarehouseType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetWarehouseResponseWarehouseType) Set(v string) error { + switch v { + case `CLASSIC`, `PRO`, `TYPE_UNSPECIFIED`: + *f = GetWarehouseResponseWarehouseType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC", "PRO", "TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns GetWarehouseResponseWarehouseType to satisfy [pflag.Value] interface +func (f *GetWarehouseResponseWarehouseType) Type() string { + return "GetWarehouseResponseWarehouseType" +} + +type GetWorkspaceWarehouseConfigResponse struct { + // Optional: Channel selection details + Channel *Channel `json:"channel,omitempty"` + // Deprecated: Use sql_configuration_parameters + ConfigParam *RepeatedEndpointConfPairs `json:"config_param,omitempty"` + // Spark confs for external hive metastore configuration JSON serialized + // size must be less than <= 512K + DataAccessConfig []EndpointConfPair `json:"data_access_config,omitempty"` + // List of Warehouse Types allowed in this workspace (limits allowed value + // of the type field in CreateWarehouse and EditWarehouse). Note: Some types + // cannot be disabled, they don't need to be specified in + // SetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing + // warehouses to be converted to another type. Used by frontend to save + // specific type availability in the warehouse create and edit form UI. + EnabledWarehouseTypes []WarehouseTypePair `json:"enabled_warehouse_types,omitempty"` + // Deprecated: Use sql_configuration_parameters + GlobalParam *RepeatedEndpointConfPairs `json:"global_param,omitempty"` + // GCP only: Google Service Account used to pass to cluster to access Google + // Cloud Storage + GoogleServiceAccount string `json:"google_service_account,omitempty"` + // AWS Only: Instance profile used to pass IAM role to the cluster + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // Security policy for warehouses + SecurityPolicy GetWorkspaceWarehouseConfigResponseSecurityPolicy `json:"security_policy,omitempty"` + // SQL configuration parameters + SqlConfigurationParameters *RepeatedEndpointConfPairs `json:"sql_configuration_parameters,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetWorkspaceWarehouseConfigResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetWorkspaceWarehouseConfigResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Security policy for warehouses +type GetWorkspaceWarehouseConfigResponseSecurityPolicy string + +const GetWorkspaceWarehouseConfigResponseSecurityPolicyDataAccessControl GetWorkspaceWarehouseConfigResponseSecurityPolicy = `DATA_ACCESS_CONTROL` + +const GetWorkspaceWarehouseConfigResponseSecurityPolicyNone GetWorkspaceWarehouseConfigResponseSecurityPolicy = `NONE` + +const GetWorkspaceWarehouseConfigResponseSecurityPolicyPassthrough GetWorkspaceWarehouseConfigResponseSecurityPolicy = `PASSTHROUGH` + +// String representation for [fmt.Print] +func (f *GetWorkspaceWarehouseConfigResponseSecurityPolicy) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetWorkspaceWarehouseConfigResponseSecurityPolicy) Set(v string) error { + switch v { + case `DATA_ACCESS_CONTROL`, `NONE`, `PASSTHROUGH`: + *f = GetWorkspaceWarehouseConfigResponseSecurityPolicy(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATA_ACCESS_CONTROL", "NONE", "PASSTHROUGH"`, v) + } +} + +// Type always returns GetWorkspaceWarehouseConfigResponseSecurityPolicy to satisfy [pflag.Value] interface +func (f *GetWorkspaceWarehouseConfigResponseSecurityPolicy) Type() string { + return "GetWorkspaceWarehouseConfigResponseSecurityPolicy" +} + +type LegacyAlert struct { + // Timestamp when the alert was created. + CreatedAt string `json:"created_at,omitempty"` + // Alert ID. + Id string `json:"id,omitempty"` + // Timestamp when the alert was last triggered. + LastTriggeredAt string `json:"last_triggered_at,omitempty"` + // Name of the alert. + Name string `json:"name,omitempty"` + // Alert configuration options. + Options *AlertOptions `json:"options,omitempty"` + // The identifier of the workspace folder containing the object. + Parent string `json:"parent,omitempty"` + + Query *AlertQuery `json:"query,omitempty"` + // Number of seconds after being triggered before the alert rearms itself + // and can be triggered again. If `null`, alert will never be triggered + // again. + Rearm int `json:"rearm,omitempty"` + // State of the alert. Possible values are: `unknown` (yet to be evaluated), + // `triggered` (evaluated and fulfilled trigger conditions), or `ok` + // (evaluated and did not fulfill trigger conditions). + State LegacyAlertState `json:"state,omitempty"` + // Timestamp when the alert was last updated. + UpdatedAt string `json:"updated_at,omitempty"` + + User *User `json:"user,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LegacyAlert) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LegacyAlert) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// State of the alert. Possible values are: `unknown` (yet to be evaluated), +// `triggered` (evaluated and fulfilled trigger conditions), or `ok` (evaluated +// and did not fulfill trigger conditions). +type LegacyAlertState string + +const LegacyAlertStateOk LegacyAlertState = `ok` + +const LegacyAlertStateTriggered LegacyAlertState = `triggered` + +const LegacyAlertStateUnknown LegacyAlertState = `unknown` + +// String representation for [fmt.Print] +func (f *LegacyAlertState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *LegacyAlertState) Set(v string) error { + switch v { + case `ok`, `triggered`, `unknown`: + *f = LegacyAlertState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ok", "triggered", "unknown"`, v) + } +} + +// Type always returns LegacyAlertState to satisfy [pflag.Value] interface +func (f *LegacyAlertState) Type() string { + return "LegacyAlertState" +} + +type LegacyQuery struct { + // Describes whether the authenticated user is allowed to edit the + // definition of this query. + CanEdit bool `json:"can_edit,omitempty"` + // The timestamp when this query was created. + CreatedAt string `json:"created_at,omitempty"` + // Data source ID maps to the ID of the data source used by the resource and + // is distinct from the warehouse ID. [Learn more] + // + // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + DataSourceId string `json:"data_source_id,omitempty"` + // General description that conveys additional information about this query + // such as usage notes. + Description string `json:"description,omitempty"` + // Query ID. + Id string `json:"id,omitempty"` + // Indicates whether the query is trashed. Trashed queries can't be used in + // dashboards, or appear in search results. If this boolean is `true`, the + // `options` property for this query includes a `moved_to_trash_at` + // timestamp. Trashed queries are permanently deleted after 30 days. + IsArchived bool `json:"is_archived,omitempty"` + // Whether the query is a draft. Draft queries only appear in list views for + // their owners. Visualizations from draft queries cannot appear on + // dashboards. + IsDraft bool `json:"is_draft,omitempty"` + // Whether this query object appears in the current user's favorites list. + // This flag determines whether the star icon for favorites is selected. + IsFavorite bool `json:"is_favorite,omitempty"` + // Text parameter types are not safe from SQL injection for all types of + // data source. Set this Boolean parameter to `true` if a query either does + // not use any text type parameters or uses a data source type where text + // type parameters are handled safely. + IsSafe bool `json:"is_safe,omitempty"` + + LastModifiedBy *User `json:"last_modified_by,omitempty"` + // The ID of the user who last saved changes to this query. + LastModifiedById int `json:"last_modified_by_id,omitempty"` + // If there is a cached result for this query and user, this field includes + // the query result ID. If this query uses parameters, this field is always + // null. + LatestQueryDataId string `json:"latest_query_data_id,omitempty"` + // The title of this query that appears in list views, widget headings, and + // on the query page. + Name string `json:"name,omitempty"` + + Options *QueryOptions `json:"options,omitempty"` + // The identifier of the workspace folder containing the object. + Parent string `json:"parent,omitempty"` + // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * + // `CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query + PermissionTier PermissionLevel `json:"permission_tier,omitempty"` + // The text of the query to be run. + Query string `json:"query,omitempty"` + // A SHA-256 hash of the query text along with the authenticated user ID. + QueryHash string `json:"query_hash,omitempty"` + // Sets the **Run as** role for the object. Must be set to one of `"viewer"` + // (signifying "run as viewer" behavior) or `"owner"` (signifying "run as + // owner" behavior) + RunAsRole RunAsRole `json:"run_as_role,omitempty"` + + Tags []string `json:"tags,omitempty"` + // The timestamp at which this query was last updated. + UpdatedAt string `json:"updated_at,omitempty"` + + User *User `json:"user,omitempty"` + // The ID of the user who owns the query. + UserId int `json:"user_id,omitempty"` + + Visualizations []LegacyVisualization `json:"visualizations,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LegacyQuery) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LegacyQuery) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The visualization description API changes frequently and is unsupported. You +// can duplicate a visualization by copying description objects received _from +// the API_ and then using them to create a new one with a POST request to the +// same endpoint. Databricks does not recommend constructing ad-hoc +// visualizations entirely in JSON. +type LegacyVisualization struct { + CreatedAt string `json:"created_at,omitempty"` + // A short description of this visualization. This is not displayed in the + // UI. + Description string `json:"description,omitempty"` + // The UUID for this visualization. + Id string `json:"id,omitempty"` + // The name of the visualization that appears on dashboards and the query + // screen. + Name string `json:"name,omitempty"` + // The options object varies widely from one visualization type to the next + // and is unsupported. Databricks does not recommend modifying visualization + // settings in JSON. + Options any `json:"options,omitempty"` + + Query *LegacyQuery `json:"query,omitempty"` + // The type of visualization: chart, table, pivot table, and so on. + Type string `json:"type,omitempty"` + + UpdatedAt string `json:"updated_at,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *LegacyVisualization) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LegacyVisualization) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type LifecycleState string + +const LifecycleStateActive LifecycleState = `ACTIVE` + +const LifecycleStateTrashed LifecycleState = `TRASHED` + +// String representation for [fmt.Print] +func (f *LifecycleState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *LifecycleState) Set(v string) error { + switch v { + case `ACTIVE`, `TRASHED`: + *f = LifecycleState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "TRASHED"`, v) + } +} + +// Type always returns LifecycleState to satisfy [pflag.Value] interface +func (f *LifecycleState) Type() string { + return "LifecycleState" +} + +// List alerts +type ListAlertsRequest struct { + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAlertsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAlertsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListAlertsResponse struct { + NextPageToken string `json:"next_page_token,omitempty"` + + Results []ListAlertsResponseAlert `json:"results,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAlertsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAlertsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListAlertsResponseAlert struct { + // Trigger conditions of the alert. + Condition *AlertCondition `json:"condition,omitempty"` + // The timestamp indicating when the alert was created. + CreateTime string `json:"create_time,omitempty"` + // Custom body of alert notification, if it exists. See [here] for custom + // templating instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomBody string `json:"custom_body,omitempty"` + // Custom subject of alert notification, if it exists. This can include + // email subject entries and Slack notification headers, for example. See + // [here] for custom templating instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomSubject string `json:"custom_subject,omitempty"` + // The display name of the alert. + DisplayName string `json:"display_name,omitempty"` + // UUID identifying the alert. + Id string `json:"id,omitempty"` + // The workspace state of the alert. Used for tracking trashed status. + LifecycleState LifecycleState `json:"lifecycle_state,omitempty"` + // Whether to notify alert subscribers when alert returns back to normal. + NotifyOnOk bool `json:"notify_on_ok,omitempty"` + // The owner's username. This field is set to "Unavailable" if the user has + // been deleted. + OwnerUserName string `json:"owner_user_name,omitempty"` + // UUID of the query attached to the alert. + QueryId string `json:"query_id,omitempty"` + // Number of seconds an alert must wait after being triggered to rearm + // itself. After rearming, it can be triggered again. If 0 or not specified, + // the alert will not be triggered again. + SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"` + // Current state of the alert's trigger status. This field is set to UNKNOWN + // if the alert has not yet been evaluated or ran into an error during the + // last evaluation. + State AlertState `json:"state,omitempty"` + // Timestamp when the alert was last triggered, if the alert has been + // triggered before. + TriggerTime string `json:"trigger_time,omitempty"` + // The timestamp indicating when the alert was updated. + UpdateTime string `json:"update_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListAlertsResponseAlert) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAlertsResponseAlert) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get dashboard objects +type ListDashboardsRequest struct { + // Name of dashboard attribute to order by. + Order ListOrder `json:"-" url:"order,omitempty"` + // Page number to retrieve. + Page int `json:"-" url:"page,omitempty"` + // Number of dashboards to return per page. + PageSize int `json:"-" url:"page_size,omitempty"` + // Full text search term. + Q string `json:"-" url:"q,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListDashboardsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListDashboardsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListOrder string + +const ListOrderCreatedAt ListOrder = `created_at` + +const ListOrderName ListOrder = `name` + +// String representation for [fmt.Print] +func (f *ListOrder) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListOrder) Set(v string) error { + switch v { + case `created_at`, `name`: + *f = ListOrder(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "created_at", "name"`, v) + } +} + +// Type always returns ListOrder to satisfy [pflag.Value] interface +func (f *ListOrder) Type() string { + return "ListOrder" +} + +// Get a list of queries +type ListQueriesLegacyRequest struct { + // Name of query attribute to order by. Default sort order is ascending. + // Append a dash (`-`) to order descending instead. + // + // - `name`: The name of the query. + // + // - `created_at`: The timestamp the query was created. + // + // - `runtime`: The time it took to run this query. This is blank for + // parameterized queries. A blank value is treated as the highest value for + // sorting. + // + // - `executed_at`: The timestamp when the query was last run. + // + // - `created_by`: The user name of the user that created the query. + Order string `json:"-" url:"order,omitempty"` + // Page number to retrieve. + Page int `json:"-" url:"page,omitempty"` + // Number of queries to return per page. + PageSize int `json:"-" url:"page_size,omitempty"` + // Full text search term + Q string `json:"-" url:"q,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListQueriesLegacyRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListQueriesLegacyRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List queries +type ListQueriesRequest struct { + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListQueriesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListQueriesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListQueriesResponse struct { + // Whether there is another page of results. + HasNextPage bool `json:"has_next_page,omitempty"` + // A token that can be used to get the next page of results. + NextPageToken string `json:"next_page_token,omitempty"` + + Res []QueryInfo `json:"res,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListQueriesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListQueriesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List Queries +type ListQueryHistoryRequest struct { + // A filter to limit query history results. This field is optional. + FilterBy *QueryFilter `json:"-" url:"filter_by,omitempty"` + // Whether to include the query metrics with each query. Only use this for a + // small subset of queries (max_results). Defaults to false. + IncludeMetrics bool `json:"-" url:"include_metrics,omitempty"` + // Limit the number of results returned in one page. Must be less than 1000 + // and the default is 100. + MaxResults int `json:"-" url:"max_results,omitempty"` + // A token that can be used to get the next page of results. The token can + // contains characters that need to be encoded before using it in a URL. For + // example, the character '+' needs to be replaced by %2B. This field is + // optional. + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListQueryHistoryRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListQueryHistoryRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListQueryObjectsResponse struct { + NextPageToken string `json:"next_page_token,omitempty"` + + Results []ListQueryObjectsResponseQuery `json:"results,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListQueryObjectsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListQueryObjectsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListQueryObjectsResponseQuery struct { + // Whether to apply a 1000 row limit to the query result. + ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"` + // Name of the catalog where this query will be executed. + Catalog string `json:"catalog,omitempty"` + // Timestamp when this query was created. + CreateTime string `json:"create_time,omitempty"` + // General description that conveys additional information about this query + // such as usage notes. + Description string `json:"description,omitempty"` + // Display name of the query that appears in list views, widget headings, + // and on the query page. + DisplayName string `json:"display_name,omitempty"` + // UUID identifying the query. + Id string `json:"id,omitempty"` + // Username of the user who last saved changes to this query. + LastModifierUserName string `json:"last_modifier_user_name,omitempty"` + // Indicates whether the query is trashed. + LifecycleState LifecycleState `json:"lifecycle_state,omitempty"` + // Username of the user that owns the query. + OwnerUserName string `json:"owner_user_name,omitempty"` + // List of query parameter definitions. + Parameters []QueryParameter `json:"parameters,omitempty"` + // Text of the query to be run. + QueryText string `json:"query_text,omitempty"` + // Sets the "Run as" role for the object. + RunAsMode RunAsMode `json:"run_as_mode,omitempty"` + // Name of the schema where this query will be executed. + Schema string `json:"schema,omitempty"` + + Tags []string `json:"tags,omitempty"` + // Timestamp when this query was last updated. + UpdateTime string `json:"update_time,omitempty"` + // ID of the SQL warehouse attached to the query. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListQueryObjectsResponseQuery) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListQueryObjectsResponseQuery) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListResponse struct { + // The total number of dashboards. + Count int `json:"count,omitempty"` + // The current page being displayed. + Page int `json:"page,omitempty"` + // The number of dashboards per page. + PageSize int `json:"page_size,omitempty"` + // List of dashboards returned. + Results []Dashboard `json:"results,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List visualizations on a query +type ListVisualizationsForQueryRequest struct { + Id string `json:"-" url:"-"` + + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListVisualizationsForQueryRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListVisualizationsForQueryRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListVisualizationsForQueryResponse struct { + NextPageToken string `json:"next_page_token,omitempty"` + + Results []Visualization `json:"results,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListVisualizationsForQueryResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListVisualizationsForQueryResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List warehouses +type ListWarehousesRequest struct { + // Service Principal which will be used to fetch the list of warehouses. If + // not specified, the user from the session header is used. + RunAsUserId int `json:"-" url:"run_as_user_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListWarehousesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListWarehousesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListWarehousesResponse struct { + // A list of warehouses and their configurations. + Warehouses []EndpointInfo `json:"warehouses,omitempty"` +} + +type MultiValuesOptions struct { + // Character that prefixes each selected parameter value. + Prefix string `json:"prefix,omitempty"` + // Character that separates each selected parameter value. Defaults to a + // comma. + Separator string `json:"separator,omitempty"` + // Character that suffixes each selected parameter value. + Suffix string `json:"suffix,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MultiValuesOptions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MultiValuesOptions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type NumericValue struct { + Value float64 `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *NumericValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NumericValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// A singular noun object type. +type ObjectType string + +const ObjectTypeAlert ObjectType = `alert` + +const ObjectTypeDashboard ObjectType = `dashboard` + +const ObjectTypeDataSource ObjectType = `data_source` + +const ObjectTypeQuery ObjectType = `query` + +// String representation for [fmt.Print] +func (f *ObjectType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ObjectType) Set(v string) error { + switch v { + case `alert`, `dashboard`, `data_source`, `query`: + *f = ObjectType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "alert", "dashboard", "data_source", "query"`, v) + } +} + +// Type always returns ObjectType to satisfy [pflag.Value] interface +func (f *ObjectType) Type() string { + return "ObjectType" +} + +// Always a plural of the object type. +type ObjectTypePlural string + +const ObjectTypePluralAlerts ObjectTypePlural = `alerts` + +const ObjectTypePluralDashboards ObjectTypePlural = `dashboards` + +const ObjectTypePluralDataSources ObjectTypePlural = `data_sources` + +const ObjectTypePluralQueries ObjectTypePlural = `queries` + +// String representation for [fmt.Print] +func (f *ObjectTypePlural) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ObjectTypePlural) Set(v string) error { + switch v { + case `alerts`, `dashboards`, `data_sources`, `queries`: + *f = ObjectTypePlural(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "alerts", "dashboards", "data_sources", "queries"`, v) + } +} + +// Type always returns ObjectTypePlural to satisfy [pflag.Value] interface +func (f *ObjectTypePlural) Type() string { + return "ObjectTypePlural" +} + +type OdbcParams struct { + Hostname string `json:"hostname,omitempty"` + + Path string `json:"path,omitempty"` + + Port int `json:"port,omitempty"` + + Protocol string `json:"protocol,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *OdbcParams) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s OdbcParams) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The singular form of the type of object which can be owned. +type OwnableObjectType string + +const OwnableObjectTypeAlert OwnableObjectType = `alert` + +const OwnableObjectTypeDashboard OwnableObjectType = `dashboard` + +const OwnableObjectTypeQuery OwnableObjectType = `query` + +// String representation for [fmt.Print] +func (f *OwnableObjectType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *OwnableObjectType) Set(v string) error { + switch v { + case `alert`, `dashboard`, `query`: + *f = OwnableObjectType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "alert", "dashboard", "query"`, v) + } +} + +// Type always returns OwnableObjectType to satisfy [pflag.Value] interface +func (f *OwnableObjectType) Type() string { + return "OwnableObjectType" +} + +type Parameter struct { + // List of valid parameter values, newline delimited. Only applies for + // dropdown list parameters. + EnumOptions string `json:"enumOptions,omitempty"` + // If specified, allows multiple values to be selected for this parameter. + // Only applies to dropdown list and query-based dropdown list parameters. + MultiValuesOptions *MultiValuesOptions `json:"multiValuesOptions,omitempty"` + // The literal parameter marker that appears between double curly braces in + // the query text. + Name string `json:"name,omitempty"` + // The UUID of the query that provides the parameter values. Only applies + // for query-based dropdown list parameters. + QueryId string `json:"queryId,omitempty"` + // The text displayed in a parameter picking widget. + Title string `json:"title,omitempty"` + // Parameters can have several different types. + Type ParameterType `json:"type,omitempty"` + // The default value for this parameter. + Value any `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Parameter) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Parameter) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Parameters can have several different types. +type ParameterType string + +const ParameterTypeDatetime ParameterType = `datetime` + +const ParameterTypeEnum ParameterType = `enum` + +const ParameterTypeNumber ParameterType = `number` + +const ParameterTypeQuery ParameterType = `query` + +const ParameterTypeText ParameterType = `text` + +// String representation for [fmt.Print] +func (f *ParameterType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ParameterType) Set(v string) error { + switch v { + case `datetime`, `enum`, `number`, `query`, `text`: + *f = ParameterType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "datetime", "enum", "number", "query", "text"`, v) + } +} + +// Type always returns ParameterType to satisfy [pflag.Value] interface +func (f *ParameterType) Type() string { + return "ParameterType" +} + +// * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * `CAN_EDIT`: +// Can edit the query * `CAN_MANAGE`: Can manage the query +type PermissionLevel string + +// Can edit the query +const PermissionLevelCanEdit PermissionLevel = `CAN_EDIT` + +// Can manage the query +const PermissionLevelCanManage PermissionLevel = `CAN_MANAGE` + +// Can run the query +const PermissionLevelCanRun PermissionLevel = `CAN_RUN` + +// Can view the query +const PermissionLevelCanView PermissionLevel = `CAN_VIEW` + +// String representation for [fmt.Print] +func (f *PermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_RUN`, `CAN_VIEW`: + *f = PermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_RUN", "CAN_VIEW"`, v) + } +} + +// Type always returns PermissionLevel to satisfy [pflag.Value] interface +func (f *PermissionLevel) Type() string { + return "PermissionLevel" +} + +// Possible Reasons for which we have not saved plans in the database +type PlansState string + +const PlansStateEmpty PlansState = `EMPTY` + +const PlansStateExists PlansState = `EXISTS` + +const PlansStateIgnoredLargePlansSize PlansState = `IGNORED_LARGE_PLANS_SIZE` + +const PlansStateIgnoredSmallDuration PlansState = `IGNORED_SMALL_DURATION` + +const PlansStateIgnoredSparkPlanType PlansState = `IGNORED_SPARK_PLAN_TYPE` + +const PlansStateUnknown PlansState = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *PlansState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PlansState) Set(v string) error { + switch v { + case `EMPTY`, `EXISTS`, `IGNORED_LARGE_PLANS_SIZE`, `IGNORED_SMALL_DURATION`, `IGNORED_SPARK_PLAN_TYPE`, `UNKNOWN`: + *f = PlansState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EMPTY", "EXISTS", "IGNORED_LARGE_PLANS_SIZE", "IGNORED_SMALL_DURATION", "IGNORED_SPARK_PLAN_TYPE", "UNKNOWN"`, v) + } +} + +// Type always returns PlansState to satisfy [pflag.Value] interface +func (f *PlansState) Type() string { + return "PlansState" +} + +type Query struct { + // Whether to apply a 1000 row limit to the query result. + ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"` + // Name of the catalog where this query will be executed. + Catalog string `json:"catalog,omitempty"` + // Timestamp when this query was created. + CreateTime string `json:"create_time,omitempty"` + // General description that conveys additional information about this query + // such as usage notes. + Description string `json:"description,omitempty"` + // Display name of the query that appears in list views, widget headings, + // and on the query page. + DisplayName string `json:"display_name,omitempty"` + // UUID identifying the query. + Id string `json:"id,omitempty"` + // Username of the user who last saved changes to this query. + LastModifierUserName string `json:"last_modifier_user_name,omitempty"` + // Indicates whether the query is trashed. + LifecycleState LifecycleState `json:"lifecycle_state,omitempty"` + // Username of the user that owns the query. + OwnerUserName string `json:"owner_user_name,omitempty"` + // List of query parameter definitions. + Parameters []QueryParameter `json:"parameters,omitempty"` + // Workspace path of the workspace folder containing the object. + ParentPath string `json:"parent_path,omitempty"` + // Text of the query to be run. + QueryText string `json:"query_text,omitempty"` + // Sets the "Run as" role for the object. + RunAsMode RunAsMode `json:"run_as_mode,omitempty"` + // Name of the schema where this query will be executed. + Schema string `json:"schema,omitempty"` + + Tags []string `json:"tags,omitempty"` + // Timestamp when this query was last updated. + UpdateTime string `json:"update_time,omitempty"` + // ID of the SQL warehouse attached to the query. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Query) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Query) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryBackedValue struct { + // If specified, allows multiple values to be selected for this parameter. + MultiValuesOptions *MultiValuesOptions `json:"multi_values_options,omitempty"` + // UUID of the query that provides the parameter values. + QueryId string `json:"query_id,omitempty"` + // List of selected query parameter values. + Values []string `json:"values,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryBackedValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryBackedValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryEditContent struct { + // Data source ID maps to the ID of the data source used by the resource and + // is distinct from the warehouse ID. [Learn more] + // + // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + DataSourceId string `json:"data_source_id,omitempty"` + // General description that conveys additional information about this query + // such as usage notes. + Description string `json:"description,omitempty"` + // The title of this query that appears in list views, widget headings, and + // on the query page. + Name string `json:"name,omitempty"` + // Exclusively used for storing a list parameter definitions. A parameter is + // an object with `title`, `name`, `type`, and `value` properties. The + // `value` field here is the default value. It can be overridden at runtime. + Options any `json:"options,omitempty"` + // The text of the query to be run. + Query string `json:"query,omitempty"` + + QueryId string `json:"-" url:"-"` + // Sets the **Run as** role for the object. Must be set to one of `"viewer"` + // (signifying "run as viewer" behavior) or `"owner"` (signifying "run as + // owner" behavior) + RunAsRole RunAsRole `json:"run_as_role,omitempty"` + + Tags []string `json:"tags,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryEditContent) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryEditContent) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryFilter struct { + // A range filter for query submitted time. The time range must be <= 30 + // days. + QueryStartTimeRange *TimeRange `json:"query_start_time_range,omitempty" url:"query_start_time_range,omitempty"` + // A list of statement IDs. + StatementIds []string `json:"statement_ids,omitempty" url:"statement_ids,omitempty"` + + Statuses []QueryStatus `json:"statuses,omitempty" url:"statuses,omitempty"` + // A list of user IDs who ran the queries. + UserIds []int64 `json:"user_ids,omitempty" url:"user_ids,omitempty"` + // A list of warehouse IDs. + WarehouseIds []string `json:"warehouse_ids,omitempty" url:"warehouse_ids,omitempty"` +} + +type QueryInfo struct { + // SQL Warehouse channel information at the time of query execution + ChannelUsed *ChannelInfo `json:"channel_used,omitempty"` + // Total execution time of the statement ( excluding result fetch time ). + Duration int64 `json:"duration,omitempty"` + // Alias for `warehouse_id`. + EndpointId string `json:"endpoint_id,omitempty"` + // Message describing why the query could not complete. + ErrorMessage string `json:"error_message,omitempty"` + // The ID of the user whose credentials were used to run the query. + ExecutedAsUserId int64 `json:"executed_as_user_id,omitempty"` + // The email address or username of the user whose credentials were used to + // run the query. + ExecutedAsUserName string `json:"executed_as_user_name,omitempty"` + // The time execution of the query ended. + ExecutionEndTimeMs int64 `json:"execution_end_time_ms,omitempty"` + // Whether more updates for the query are expected. + IsFinal bool `json:"is_final,omitempty"` + // A key that can be used to look up query details. + LookupKey string `json:"lookup_key,omitempty"` + // Metrics about query execution. + Metrics *QueryMetrics `json:"metrics,omitempty"` + // Whether plans exist for the execution, or the reason why they are missing + PlansState PlansState `json:"plans_state,omitempty"` + // The time the query ended. + QueryEndTimeMs int64 `json:"query_end_time_ms,omitempty"` + // The query ID. + QueryId string `json:"query_id,omitempty"` + // The time the query started. + QueryStartTimeMs int64 `json:"query_start_time_ms,omitempty"` + // The text of the query. + QueryText string `json:"query_text,omitempty"` + // The number of results returned by the query. + RowsProduced int64 `json:"rows_produced,omitempty"` + // URL to the Spark UI query plan. + SparkUiUrl string `json:"spark_ui_url,omitempty"` + // Type of statement for this query + StatementType QueryStatementType `json:"statement_type,omitempty"` + // Query status with one the following values: + // + // - `QUEUED`: Query has been received and queued. - `RUNNING`: Query has + // started. - `CANCELED`: Query has been cancelled by the user. - `FAILED`: + // Query has failed. - `FINISHED`: Query has completed. + Status QueryStatus `json:"status,omitempty"` + // The ID of the user who ran the query. + UserId int64 `json:"user_id,omitempty"` + // The email address or username of the user who ran the query. + UserName string `json:"user_name,omitempty"` + // Warehouse ID. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryList struct { + // The total number of queries. + Count int `json:"count,omitempty"` + // The page number that is currently displayed. + Page int `json:"page,omitempty"` + // The number of queries per page. + PageSize int `json:"page_size,omitempty"` + // List of queries returned. + Results []LegacyQuery `json:"results,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryList) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryList) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// A query metric that encapsulates a set of measurements for a single query. +// Metrics come from the driver and are stored in the history service database. +type QueryMetrics struct { + // Time spent loading metadata and optimizing the query, in milliseconds. + CompilationTimeMs int64 `json:"compilation_time_ms,omitempty"` + // Time spent executing the query, in milliseconds. + ExecutionTimeMs int64 `json:"execution_time_ms,omitempty"` + // Total amount of data sent over the network between executor nodes during + // shuffle, in bytes. + NetworkSentBytes int64 `json:"network_sent_bytes,omitempty"` + // Timestamp of when the query was enqueued waiting while the warehouse was + // at max load. This field is optional and will not appear if the query + // skipped the overloading queue. + OverloadingQueueStartTimestamp int64 `json:"overloading_queue_start_timestamp,omitempty"` + // Total execution time for all individual Photon query engine tasks in the + // query, in milliseconds. + PhotonTotalTimeMs int64 `json:"photon_total_time_ms,omitempty"` + // Timestamp of when the query was enqueued waiting for a cluster to be + // provisioned for the warehouse. This field is optional and will not appear + // if the query skipped the provisioning queue. + ProvisioningQueueStartTimestamp int64 `json:"provisioning_queue_start_timestamp,omitempty"` + // Total number of bytes in all tables not read due to pruning + PrunedBytes int64 `json:"pruned_bytes,omitempty"` + // Total number of files from all tables not read due to pruning + PrunedFilesCount int64 `json:"pruned_files_count,omitempty"` + // Timestamp of when the underlying compute started compilation of the + // query. + QueryCompilationStartTimestamp int64 `json:"query_compilation_start_timestamp,omitempty"` + // Total size of data read by the query, in bytes. + ReadBytes int64 `json:"read_bytes,omitempty"` + // Size of persistent data read from the cache, in bytes. + ReadCacheBytes int64 `json:"read_cache_bytes,omitempty"` + // Number of files read after pruning + ReadFilesCount int64 `json:"read_files_count,omitempty"` + // Number of partitions read after pruning. + ReadPartitionsCount int64 `json:"read_partitions_count,omitempty"` + // Size of persistent data read from cloud object storage on your cloud + // tenant, in bytes. + ReadRemoteBytes int64 `json:"read_remote_bytes,omitempty"` + // Time spent fetching the query results after the execution finished, in + // milliseconds. + ResultFetchTimeMs int64 `json:"result_fetch_time_ms,omitempty"` + // `true` if the query result was fetched from cache, `false` otherwise. + ResultFromCache bool `json:"result_from_cache,omitempty"` + // Total number of rows returned by the query. + RowsProducedCount int64 `json:"rows_produced_count,omitempty"` + // Total number of rows read by the query. + RowsReadCount int64 `json:"rows_read_count,omitempty"` + // Size of data temporarily written to disk while executing the query, in + // bytes. + SpillToDiskBytes int64 `json:"spill_to_disk_bytes,omitempty"` + // Sum of execution time for all of the query’s tasks, in milliseconds. + TaskTotalTimeMs int64 `json:"task_total_time_ms,omitempty"` + // Total execution time of the query from the client’s point of view, in + // milliseconds. + TotalTimeMs int64 `json:"total_time_ms,omitempty"` + // Size pf persistent data written to cloud object storage in your cloud + // tenant, in bytes. + WriteRemoteBytes int64 `json:"write_remote_bytes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryMetrics) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryMetrics) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryOptions struct { + // The name of the catalog to execute this query in. + Catalog string `json:"catalog,omitempty"` + // The timestamp when this query was moved to trash. Only present when the + // `is_archived` property is `true`. Trashed items are deleted after thirty + // days. + MovedToTrashAt string `json:"moved_to_trash_at,omitempty"` + + Parameters []Parameter `json:"parameters,omitempty"` + // The name of the schema to execute this query in. + Schema string `json:"schema,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryOptions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryOptions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryParameter struct { + // Date-range query parameter value. Can only specify one of + // `dynamic_date_range_value` or `date_range_value`. + DateRangeValue *DateRangeValue `json:"date_range_value,omitempty"` + // Date query parameter value. Can only specify one of `dynamic_date_value` + // or `date_value`. + DateValue *DateValue `json:"date_value,omitempty"` + // Dropdown query parameter value. + EnumValue *EnumValue `json:"enum_value,omitempty"` + // Literal parameter marker that appears between double curly braces in the + // query text. + Name string `json:"name,omitempty"` + // Numeric query parameter value. + NumericValue *NumericValue `json:"numeric_value,omitempty"` + // Query-based dropdown query parameter value. + QueryBackedValue *QueryBackedValue `json:"query_backed_value,omitempty"` + // Text query parameter value. + TextValue *TextValue `json:"text_value,omitempty"` + // Text displayed in the user-facing parameter widget in the UI. + Title string `json:"title,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryParameter) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryParameter) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryPostContent struct { + // Data source ID maps to the ID of the data source used by the resource and + // is distinct from the warehouse ID. [Learn more] + // + // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + DataSourceId string `json:"data_source_id,omitempty"` + // General description that conveys additional information about this query + // such as usage notes. + Description string `json:"description,omitempty"` + // The title of this query that appears in list views, widget headings, and + // on the query page. + Name string `json:"name,omitempty"` + // Exclusively used for storing a list parameter definitions. A parameter is + // an object with `title`, `name`, `type`, and `value` properties. The + // `value` field here is the default value. It can be overridden at runtime. + Options any `json:"options,omitempty"` + // The identifier of the workspace folder containing the object. + Parent string `json:"parent,omitempty"` + // The text of the query to be run. + Query string `json:"query,omitempty"` + // Sets the **Run as** role for the object. Must be set to one of `"viewer"` + // (signifying "run as viewer" behavior) or `"owner"` (signifying "run as + // owner" behavior) + RunAsRole RunAsRole `json:"run_as_role,omitempty"` + + Tags []string `json:"tags,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryPostContent) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryPostContent) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryStatementType string + +const QueryStatementTypeAlter QueryStatementType = `ALTER` + +const QueryStatementTypeAnalyze QueryStatementType = `ANALYZE` + +const QueryStatementTypeCopy QueryStatementType = `COPY` + +const QueryStatementTypeCreate QueryStatementType = `CREATE` + +const QueryStatementTypeDelete QueryStatementType = `DELETE` + +const QueryStatementTypeDescribe QueryStatementType = `DESCRIBE` + +const QueryStatementTypeDrop QueryStatementType = `DROP` + +const QueryStatementTypeExplain QueryStatementType = `EXPLAIN` + +const QueryStatementTypeGrant QueryStatementType = `GRANT` + +const QueryStatementTypeInsert QueryStatementType = `INSERT` + +const QueryStatementTypeMerge QueryStatementType = `MERGE` + +const QueryStatementTypeOptimize QueryStatementType = `OPTIMIZE` + +const QueryStatementTypeOther QueryStatementType = `OTHER` + +const QueryStatementTypeRefresh QueryStatementType = `REFRESH` + +const QueryStatementTypeReplace QueryStatementType = `REPLACE` + +const QueryStatementTypeRevoke QueryStatementType = `REVOKE` + +const QueryStatementTypeSelect QueryStatementType = `SELECT` + +const QueryStatementTypeSet QueryStatementType = `SET` + +const QueryStatementTypeShow QueryStatementType = `SHOW` + +const QueryStatementTypeTruncate QueryStatementType = `TRUNCATE` + +const QueryStatementTypeUpdate QueryStatementType = `UPDATE` + +const QueryStatementTypeUse QueryStatementType = `USE` + +// String representation for [fmt.Print] +func (f *QueryStatementType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *QueryStatementType) Set(v string) error { + switch v { + case `ALTER`, `ANALYZE`, `COPY`, `CREATE`, `DELETE`, `DESCRIBE`, `DROP`, `EXPLAIN`, `GRANT`, `INSERT`, `MERGE`, `OPTIMIZE`, `OTHER`, `REFRESH`, `REPLACE`, `REVOKE`, `SELECT`, `SET`, `SHOW`, `TRUNCATE`, `UPDATE`, `USE`: + *f = QueryStatementType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALTER", "ANALYZE", "COPY", "CREATE", "DELETE", "DESCRIBE", "DROP", "EXPLAIN", "GRANT", "INSERT", "MERGE", "OPTIMIZE", "OTHER", "REFRESH", "REPLACE", "REVOKE", "SELECT", "SET", "SHOW", "TRUNCATE", "UPDATE", "USE"`, v) + } +} + +// Type always returns QueryStatementType to satisfy [pflag.Value] interface +func (f *QueryStatementType) Type() string { + return "QueryStatementType" +} + +// Statuses which are also used by OperationStatus in runtime +type QueryStatus string + +const QueryStatusCanceled QueryStatus = `CANCELED` + +const QueryStatusCompiled QueryStatus = `COMPILED` + +const QueryStatusCompiling QueryStatus = `COMPILING` + +const QueryStatusFailed QueryStatus = `FAILED` + +const QueryStatusFinished QueryStatus = `FINISHED` + +const QueryStatusQueued QueryStatus = `QUEUED` + +const QueryStatusRunning QueryStatus = `RUNNING` + +const QueryStatusStarted QueryStatus = `STARTED` + +// String representation for [fmt.Print] +func (f *QueryStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *QueryStatus) Set(v string) error { + switch v { + case `CANCELED`, `COMPILED`, `COMPILING`, `FAILED`, `FINISHED`, `QUEUED`, `RUNNING`, `STARTED`: + *f = QueryStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "COMPILED", "COMPILING", "FAILED", "FINISHED", "QUEUED", "RUNNING", "STARTED"`, v) + } +} + +// Type always returns QueryStatus to satisfy [pflag.Value] interface +func (f *QueryStatus) Type() string { + return "QueryStatus" +} + +type RepeatedEndpointConfPairs struct { + // Deprecated: Use configuration_pairs + ConfigPair []EndpointConfPair `json:"config_pair,omitempty"` + + ConfigurationPairs []EndpointConfPair `json:"configuration_pairs,omitempty"` +} + +// Restore a dashboard +type RestoreDashboardRequest struct { + DashboardId string `json:"-" url:"-"` +} + +// Restore a query +type RestoreQueriesLegacyRequest struct { + QueryId string `json:"-" url:"-"` +} + +type RestoreResponse struct { +} + +type ResultData struct { + // The number of bytes in the result chunk. This field is not available when + // using `INLINE` disposition. + ByteCount int64 `json:"byte_count,omitempty"` + // The position within the sequence of result set chunks. + ChunkIndex int `json:"chunk_index,omitempty"` + // The `JSON_ARRAY` format is an array of arrays of values, where each + // non-null value is formatted as a string. Null values are encoded as JSON + // `null`. + DataArray [][]string `json:"data_array,omitempty"` + + ExternalLinks []ExternalLink `json:"external_links,omitempty"` + // When fetching, provides the `chunk_index` for the _next_ chunk. If + // absent, indicates there are no more chunks. The next chunk can be fetched + // with a :method:statementexecution/getStatementResultChunkN request. + NextChunkIndex int `json:"next_chunk_index,omitempty"` + // When fetching, provides a link to fetch the _next_ chunk. If absent, + // indicates there are no more chunks. This link is an absolute `path` to be + // joined with your `$DATABRICKS_HOST`, and should be treated as an opaque + // link. This is an alternative to using `next_chunk_index`. + NextChunkInternalLink string `json:"next_chunk_internal_link,omitempty"` + // The number of rows within the result chunk. + RowCount int64 `json:"row_count,omitempty"` + // The starting row offset within the result set. + RowOffset int64 `json:"row_offset,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResultData) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResultData) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The result manifest provides schema and metadata for the result set. +type ResultManifest struct { + // Array of result set chunk metadata. + Chunks []BaseChunkInfo `json:"chunks,omitempty"` + + Format Format `json:"format,omitempty"` + // The schema is an ordered list of column descriptions. + Schema *ResultSchema `json:"schema,omitempty"` + // The total number of bytes in the result set. This field is not available + // when using `INLINE` disposition. + TotalByteCount int64 `json:"total_byte_count,omitempty"` + // The total number of chunks that the result set has been divided into. + TotalChunkCount int `json:"total_chunk_count,omitempty"` + // The total number of rows in the result set. + TotalRowCount int64 `json:"total_row_count,omitempty"` + // Indicates whether the result is truncated due to `row_limit` or + // `byte_limit`. + Truncated bool `json:"truncated,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResultManifest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResultManifest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The schema is an ordered list of column descriptions. +type ResultSchema struct { + ColumnCount int `json:"column_count,omitempty"` + + Columns []ColumnInfo `json:"columns,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResultSchema) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResultSchema) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RunAsMode string + +const RunAsModeOwner RunAsMode = `OWNER` + +const RunAsModeViewer RunAsMode = `VIEWER` + +// String representation for [fmt.Print] +func (f *RunAsMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunAsMode) Set(v string) error { + switch v { + case `OWNER`, `VIEWER`: + *f = RunAsMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "OWNER", "VIEWER"`, v) + } +} + +// Type always returns RunAsMode to satisfy [pflag.Value] interface +func (f *RunAsMode) Type() string { + return "RunAsMode" +} + +// Sets the **Run as** role for the object. Must be set to one of `"viewer"` +// (signifying "run as viewer" behavior) or `"owner"` (signifying "run as owner" +// behavior) +type RunAsRole string + +const RunAsRoleOwner RunAsRole = `owner` + +const RunAsRoleViewer RunAsRole = `viewer` + +// String representation for [fmt.Print] +func (f *RunAsRole) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunAsRole) Set(v string) error { + switch v { + case `owner`, `viewer`: + *f = RunAsRole(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "owner", "viewer"`, v) + } +} + +// Type always returns RunAsRole to satisfy [pflag.Value] interface +func (f *RunAsRole) Type() string { + return "RunAsRole" +} + +type ServiceError struct { + ErrorCode ServiceErrorCode `json:"error_code,omitempty"` + // A brief summary of the error condition. + Message string `json:"message,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ServiceError) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ServiceError) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ServiceErrorCode string + +const ServiceErrorCodeAborted ServiceErrorCode = `ABORTED` + +const ServiceErrorCodeAlreadyExists ServiceErrorCode = `ALREADY_EXISTS` + +const ServiceErrorCodeBadRequest ServiceErrorCode = `BAD_REQUEST` + +const ServiceErrorCodeCancelled ServiceErrorCode = `CANCELLED` + +const ServiceErrorCodeDeadlineExceeded ServiceErrorCode = `DEADLINE_EXCEEDED` + +const ServiceErrorCodeInternalError ServiceErrorCode = `INTERNAL_ERROR` + +const ServiceErrorCodeIoError ServiceErrorCode = `IO_ERROR` + +const ServiceErrorCodeNotFound ServiceErrorCode = `NOT_FOUND` + +const ServiceErrorCodeResourceExhausted ServiceErrorCode = `RESOURCE_EXHAUSTED` + +const ServiceErrorCodeServiceUnderMaintenance ServiceErrorCode = `SERVICE_UNDER_MAINTENANCE` + +const ServiceErrorCodeTemporarilyUnavailable ServiceErrorCode = `TEMPORARILY_UNAVAILABLE` + +const ServiceErrorCodeUnauthenticated ServiceErrorCode = `UNAUTHENTICATED` + +const ServiceErrorCodeUnknown ServiceErrorCode = `UNKNOWN` + +const ServiceErrorCodeWorkspaceTemporarilyUnavailable ServiceErrorCode = `WORKSPACE_TEMPORARILY_UNAVAILABLE` + +// String representation for [fmt.Print] +func (f *ServiceErrorCode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServiceErrorCode) Set(v string) error { + switch v { + case `ABORTED`, `ALREADY_EXISTS`, `BAD_REQUEST`, `CANCELLED`, `DEADLINE_EXCEEDED`, `INTERNAL_ERROR`, `IO_ERROR`, `NOT_FOUND`, `RESOURCE_EXHAUSTED`, `SERVICE_UNDER_MAINTENANCE`, `TEMPORARILY_UNAVAILABLE`, `UNAUTHENTICATED`, `UNKNOWN`, `WORKSPACE_TEMPORARILY_UNAVAILABLE`: + *f = ServiceErrorCode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ABORTED", "ALREADY_EXISTS", "BAD_REQUEST", "CANCELLED", "DEADLINE_EXCEEDED", "INTERNAL_ERROR", "IO_ERROR", "NOT_FOUND", "RESOURCE_EXHAUSTED", "SERVICE_UNDER_MAINTENANCE", "TEMPORARILY_UNAVAILABLE", "UNAUTHENTICATED", "UNKNOWN", "WORKSPACE_TEMPORARILY_UNAVAILABLE"`, v) + } +} + +// Type always returns ServiceErrorCode to satisfy [pflag.Value] interface +func (f *ServiceErrorCode) Type() string { + return "ServiceErrorCode" +} + +// Set object ACL +type SetRequest struct { + AccessControlList []AccessControl `json:"access_control_list,omitempty"` + // Object ID. The ACL for the object with this UUID is overwritten by this + // request's POST content. + ObjectId string `json:"-" url:"-"` + // The type of object permission to set. + ObjectType ObjectTypePlural `json:"-" url:"-"` +} + +type SetResponse struct { + AccessControlList []AccessControl `json:"access_control_list,omitempty"` + // An object's type and UUID, separated by a forward slash (/) character. + ObjectId string `json:"object_id,omitempty"` + // A singular noun object type. + ObjectType ObjectType `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SetResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SetResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SetWorkspaceWarehouseConfigRequest struct { + // Optional: Channel selection details + Channel *Channel `json:"channel,omitempty"` + // Deprecated: Use sql_configuration_parameters + ConfigParam *RepeatedEndpointConfPairs `json:"config_param,omitempty"` + // Spark confs for external hive metastore configuration JSON serialized + // size must be less than <= 512K + DataAccessConfig []EndpointConfPair `json:"data_access_config,omitempty"` + // List of Warehouse Types allowed in this workspace (limits allowed value + // of the type field in CreateWarehouse and EditWarehouse). Note: Some types + // cannot be disabled, they don't need to be specified in + // SetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing + // warehouses to be converted to another type. Used by frontend to save + // specific type availability in the warehouse create and edit form UI. + EnabledWarehouseTypes []WarehouseTypePair `json:"enabled_warehouse_types,omitempty"` + // Deprecated: Use sql_configuration_parameters + GlobalParam *RepeatedEndpointConfPairs `json:"global_param,omitempty"` + // GCP only: Google Service Account used to pass to cluster to access Google + // Cloud Storage + GoogleServiceAccount string `json:"google_service_account,omitempty"` + // AWS Only: Instance profile used to pass IAM role to the cluster + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + // Security policy for warehouses + SecurityPolicy SetWorkspaceWarehouseConfigRequestSecurityPolicy `json:"security_policy,omitempty"` + // SQL configuration parameters + SqlConfigurationParameters *RepeatedEndpointConfPairs `json:"sql_configuration_parameters,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SetWorkspaceWarehouseConfigRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SetWorkspaceWarehouseConfigRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Security policy for warehouses +type SetWorkspaceWarehouseConfigRequestSecurityPolicy string + +const SetWorkspaceWarehouseConfigRequestSecurityPolicyDataAccessControl SetWorkspaceWarehouseConfigRequestSecurityPolicy = `DATA_ACCESS_CONTROL` + +const SetWorkspaceWarehouseConfigRequestSecurityPolicyNone SetWorkspaceWarehouseConfigRequestSecurityPolicy = `NONE` + +const SetWorkspaceWarehouseConfigRequestSecurityPolicyPassthrough SetWorkspaceWarehouseConfigRequestSecurityPolicy = `PASSTHROUGH` + +// String representation for [fmt.Print] +func (f *SetWorkspaceWarehouseConfigRequestSecurityPolicy) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SetWorkspaceWarehouseConfigRequestSecurityPolicy) Set(v string) error { + switch v { + case `DATA_ACCESS_CONTROL`, `NONE`, `PASSTHROUGH`: + *f = SetWorkspaceWarehouseConfigRequestSecurityPolicy(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATA_ACCESS_CONTROL", "NONE", "PASSTHROUGH"`, v) + } +} + +// Type always returns SetWorkspaceWarehouseConfigRequestSecurityPolicy to satisfy [pflag.Value] interface +func (f *SetWorkspaceWarehouseConfigRequestSecurityPolicy) Type() string { + return "SetWorkspaceWarehouseConfigRequestSecurityPolicy" +} + +type SetWorkspaceWarehouseConfigResponse struct { +} + +// Configurations whether the warehouse should use spot instances. +type SpotInstancePolicy string + +const SpotInstancePolicyCostOptimized SpotInstancePolicy = `COST_OPTIMIZED` + +const SpotInstancePolicyPolicyUnspecified SpotInstancePolicy = `POLICY_UNSPECIFIED` + +const SpotInstancePolicyReliabilityOptimized SpotInstancePolicy = `RELIABILITY_OPTIMIZED` + +// String representation for [fmt.Print] +func (f *SpotInstancePolicy) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SpotInstancePolicy) Set(v string) error { + switch v { + case `COST_OPTIMIZED`, `POLICY_UNSPECIFIED`, `RELIABILITY_OPTIMIZED`: + *f = SpotInstancePolicy(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COST_OPTIMIZED", "POLICY_UNSPECIFIED", "RELIABILITY_OPTIMIZED"`, v) + } +} + +// Type always returns SpotInstancePolicy to satisfy [pflag.Value] interface +func (f *SpotInstancePolicy) Type() string { + return "SpotInstancePolicy" +} + +// Start a warehouse +type StartRequest struct { + // Required. Id of the SQL warehouse. + Id string `json:"-" url:"-"` +} + +type StartWarehouseResponse struct { +} + +// State of the warehouse +type State string + +const StateDeleted State = `DELETED` + +const StateDeleting State = `DELETING` + +const StateRunning State = `RUNNING` + +const StateStarting State = `STARTING` + +const StateStopped State = `STOPPED` + +const StateStopping State = `STOPPING` + +// String representation for [fmt.Print] +func (f *State) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *State) Set(v string) error { + switch v { + case `DELETED`, `DELETING`, `RUNNING`, `STARTING`, `STOPPED`, `STOPPING`: + *f = State(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELETED", "DELETING", "RUNNING", "STARTING", "STOPPED", "STOPPING"`, v) + } +} + +// Type always returns State to satisfy [pflag.Value] interface +func (f *State) Type() string { + return "State" +} + +type StatementParameterListItem struct { + // The name of a parameter marker to be substituted in the statement. + Name string `json:"name"` + // The data type, given as a string. For example: `INT`, `STRING`, + // `DECIMAL(10,2)`. If no type is given the type is assumed to be `STRING`. + // Complex types, such as `ARRAY`, `MAP`, and `STRUCT` are not supported. + // For valid types, refer to the section [Data types] of the SQL language + // reference. + // + // [Data types]: https://docs.databricks.com/sql/language-manual/functions/cast.html + Type string `json:"type,omitempty"` + // The value to substitute, represented as a string. If omitted, the value + // is interpreted as NULL. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *StatementParameterListItem) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s StatementParameterListItem) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type StatementResponse struct { + // The result manifest provides schema and metadata for the result set. + Manifest *ResultManifest `json:"manifest,omitempty"` + + Result *ResultData `json:"result,omitempty"` + // The statement ID is returned upon successfully submitting a SQL + // statement, and is a required reference for all subsequent calls. + StatementId string `json:"statement_id,omitempty"` + // The status response includes execution state and if relevant, error + // information. + Status *StatementStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *StatementResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s StatementResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: +// running - `SUCCEEDED`: execution was successful, result data available for +// fetch - `FAILED`: execution failed; reason for failure described in +// accomanying error message - `CANCELED`: user canceled; can come from explicit +// cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: execution +// successful, and statement closed; result no longer available for fetch +type StatementState string + +// user canceled; can come from explicit cancel call, or timeout with +// `on_wait_timeout=CANCEL` +const StatementStateCanceled StatementState = `CANCELED` + +// execution successful, and statement closed; result no longer available for +// fetch +const StatementStateClosed StatementState = `CLOSED` + +// execution failed; reason for failure described in accomanying error message +const StatementStateFailed StatementState = `FAILED` + +// waiting for warehouse +const StatementStatePending StatementState = `PENDING` + +// running +const StatementStateRunning StatementState = `RUNNING` + +// execution was successful, result data available for fetch +const StatementStateSucceeded StatementState = `SUCCEEDED` + +// String representation for [fmt.Print] +func (f *StatementState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *StatementState) Set(v string) error { + switch v { + case `CANCELED`, `CLOSED`, `FAILED`, `PENDING`, `RUNNING`, `SUCCEEDED`: + *f = StatementState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "CLOSED", "FAILED", "PENDING", "RUNNING", "SUCCEEDED"`, v) + } +} + +// Type always returns StatementState to satisfy [pflag.Value] interface +func (f *StatementState) Type() string { + return "StatementState" +} + +// The status response includes execution state and if relevant, error +// information. +type StatementStatus struct { + Error *ServiceError `json:"error,omitempty"` + // Statement execution state: - `PENDING`: waiting for warehouse - + // `RUNNING`: running - `SUCCEEDED`: execution was successful, result data + // available for fetch - `FAILED`: execution failed; reason for failure + // described in accomanying error message - `CANCELED`: user canceled; can + // come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` + // - `CLOSED`: execution successful, and statement closed; result no longer + // available for fetch + State StatementState `json:"state,omitempty"` +} + +// Health status of the warehouse. +type Status string + +const StatusDegraded Status = `DEGRADED` + +const StatusFailed Status = `FAILED` + +const StatusHealthy Status = `HEALTHY` + +const StatusStatusUnspecified Status = `STATUS_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *Status) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Status) Set(v string) error { + switch v { + case `DEGRADED`, `FAILED`, `HEALTHY`, `STATUS_UNSPECIFIED`: + *f = Status(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DEGRADED", "FAILED", "HEALTHY", "STATUS_UNSPECIFIED"`, v) + } +} + +// Type always returns Status to satisfy [pflag.Value] interface +func (f *Status) Type() string { + return "Status" +} + +// Stop a warehouse +type StopRequest struct { + // Required. Id of the SQL warehouse. + Id string `json:"-" url:"-"` +} + +type StopWarehouseResponse struct { +} + +type Success struct { + Message SuccessMessage `json:"message,omitempty"` +} + +type SuccessMessage string + +const SuccessMessageSuccess SuccessMessage = `Success` + +// String representation for [fmt.Print] +func (f *SuccessMessage) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SuccessMessage) Set(v string) error { + switch v { + case `Success`: + *f = SuccessMessage(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Success"`, v) + } +} + +// Type always returns SuccessMessage to satisfy [pflag.Value] interface +func (f *SuccessMessage) Type() string { + return "SuccessMessage" +} + +type TerminationReason struct { + // status code indicating why the cluster was terminated + Code TerminationReasonCode `json:"code,omitempty"` + // list of parameters that provide additional information about why the + // cluster was terminated + Parameters map[string]string `json:"parameters,omitempty"` + // type of the termination + Type TerminationReasonType `json:"type,omitempty"` +} + +// status code indicating why the cluster was terminated +type TerminationReasonCode string + +const TerminationReasonCodeAbuseDetected TerminationReasonCode = `ABUSE_DETECTED` + +const TerminationReasonCodeAttachProjectFailure TerminationReasonCode = `ATTACH_PROJECT_FAILURE` + +const TerminationReasonCodeAwsAuthorizationFailure TerminationReasonCode = `AWS_AUTHORIZATION_FAILURE` + +const TerminationReasonCodeAwsInsufficientFreeAddressesInSubnetFailure TerminationReasonCode = `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE` + +const TerminationReasonCodeAwsInsufficientInstanceCapacityFailure TerminationReasonCode = `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE` + +const TerminationReasonCodeAwsMaxSpotInstanceCountExceededFailure TerminationReasonCode = `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE` + +const TerminationReasonCodeAwsRequestLimitExceeded TerminationReasonCode = `AWS_REQUEST_LIMIT_EXCEEDED` + +const TerminationReasonCodeAwsUnsupportedFailure TerminationReasonCode = `AWS_UNSUPPORTED_FAILURE` + +const TerminationReasonCodeAzureByokKeyPermissionFailure TerminationReasonCode = `AZURE_BYOK_KEY_PERMISSION_FAILURE` + +const TerminationReasonCodeAzureEphemeralDiskFailure TerminationReasonCode = `AZURE_EPHEMERAL_DISK_FAILURE` + +const TerminationReasonCodeAzureInvalidDeploymentTemplate TerminationReasonCode = `AZURE_INVALID_DEPLOYMENT_TEMPLATE` + +const TerminationReasonCodeAzureOperationNotAllowedException TerminationReasonCode = `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION` + +const TerminationReasonCodeAzureQuotaExceededException TerminationReasonCode = `AZURE_QUOTA_EXCEEDED_EXCEPTION` + +const TerminationReasonCodeAzureResourceManagerThrottling TerminationReasonCode = `AZURE_RESOURCE_MANAGER_THROTTLING` + +const TerminationReasonCodeAzureResourceProviderThrottling TerminationReasonCode = `AZURE_RESOURCE_PROVIDER_THROTTLING` + +const TerminationReasonCodeAzureUnexpectedDeploymentTemplateFailure TerminationReasonCode = `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE` + +const TerminationReasonCodeAzureVmExtensionFailure TerminationReasonCode = `AZURE_VM_EXTENSION_FAILURE` + +const TerminationReasonCodeAzureVnetConfigurationFailure TerminationReasonCode = `AZURE_VNET_CONFIGURATION_FAILURE` + +const TerminationReasonCodeBootstrapTimeout TerminationReasonCode = `BOOTSTRAP_TIMEOUT` + +const TerminationReasonCodeBootstrapTimeoutCloudProviderException TerminationReasonCode = `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION` + +const TerminationReasonCodeCloudProviderDiskSetupFailure TerminationReasonCode = `CLOUD_PROVIDER_DISK_SETUP_FAILURE` + +const TerminationReasonCodeCloudProviderLaunchFailure TerminationReasonCode = `CLOUD_PROVIDER_LAUNCH_FAILURE` + +const TerminationReasonCodeCloudProviderResourceStockout TerminationReasonCode = `CLOUD_PROVIDER_RESOURCE_STOCKOUT` + +const TerminationReasonCodeCloudProviderShutdown TerminationReasonCode = `CLOUD_PROVIDER_SHUTDOWN` + +const TerminationReasonCodeCommunicationLost TerminationReasonCode = `COMMUNICATION_LOST` + +const TerminationReasonCodeContainerLaunchFailure TerminationReasonCode = `CONTAINER_LAUNCH_FAILURE` + +const TerminationReasonCodeControlPlaneRequestFailure TerminationReasonCode = `CONTROL_PLANE_REQUEST_FAILURE` + +const TerminationReasonCodeDatabaseConnectionFailure TerminationReasonCode = `DATABASE_CONNECTION_FAILURE` + +const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE` + +const TerminationReasonCodeDriverUnreachable TerminationReasonCode = `DRIVER_UNREACHABLE` + +const TerminationReasonCodeDriverUnresponsive TerminationReasonCode = `DRIVER_UNRESPONSIVE` + +const TerminationReasonCodeExecutionComponentUnhealthy TerminationReasonCode = `EXECUTION_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeGcpQuotaExceeded TerminationReasonCode = `GCP_QUOTA_EXCEEDED` + +const TerminationReasonCodeGcpServiceAccountDeleted TerminationReasonCode = `GCP_SERVICE_ACCOUNT_DELETED` + +const TerminationReasonCodeGlobalInitScriptFailure TerminationReasonCode = `GLOBAL_INIT_SCRIPT_FAILURE` + +const TerminationReasonCodeHiveMetastoreProvisioningFailure TerminationReasonCode = `HIVE_METASTORE_PROVISIONING_FAILURE` + +const TerminationReasonCodeImagePullPermissionDenied TerminationReasonCode = `IMAGE_PULL_PERMISSION_DENIED` + +const TerminationReasonCodeInactivity TerminationReasonCode = `INACTIVITY` + +const TerminationReasonCodeInitScriptFailure TerminationReasonCode = `INIT_SCRIPT_FAILURE` + +const TerminationReasonCodeInstancePoolClusterFailure TerminationReasonCode = `INSTANCE_POOL_CLUSTER_FAILURE` + +const TerminationReasonCodeInstanceUnreachable TerminationReasonCode = `INSTANCE_UNREACHABLE` + +const TerminationReasonCodeInternalError TerminationReasonCode = `INTERNAL_ERROR` + +const TerminationReasonCodeInvalidArgument TerminationReasonCode = `INVALID_ARGUMENT` + +const TerminationReasonCodeInvalidSparkImage TerminationReasonCode = `INVALID_SPARK_IMAGE` + +const TerminationReasonCodeIpExhaustionFailure TerminationReasonCode = `IP_EXHAUSTION_FAILURE` + +const TerminationReasonCodeJobFinished TerminationReasonCode = `JOB_FINISHED` + +const TerminationReasonCodeK8sAutoscalingFailure TerminationReasonCode = `K8S_AUTOSCALING_FAILURE` + +const TerminationReasonCodeK8sDbrClusterLaunchTimeout TerminationReasonCode = `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT` + +const TerminationReasonCodeMetastoreComponentUnhealthy TerminationReasonCode = `METASTORE_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeNephosResourceManagement TerminationReasonCode = `NEPHOS_RESOURCE_MANAGEMENT` + +const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = `NETWORK_CONFIGURATION_FAILURE` + +const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE` + +const TerminationReasonCodeNpipTunnelSetupFailure TerminationReasonCode = `NPIP_TUNNEL_SETUP_FAILURE` + +const TerminationReasonCodeNpipTunnelTokenFailure TerminationReasonCode = `NPIP_TUNNEL_TOKEN_FAILURE` + +const TerminationReasonCodeRequestRejected TerminationReasonCode = `REQUEST_REJECTED` + +const TerminationReasonCodeRequestThrottled TerminationReasonCode = `REQUEST_THROTTLED` + +const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR` + +const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION` + +const TerminationReasonCodeSelfBootstrapFailure TerminationReasonCode = `SELF_BOOTSTRAP_FAILURE` + +const TerminationReasonCodeSkippedSlowNodes TerminationReasonCode = `SKIPPED_SLOW_NODES` + +const TerminationReasonCodeSlowImageDownload TerminationReasonCode = `SLOW_IMAGE_DOWNLOAD` + +const TerminationReasonCodeSparkError TerminationReasonCode = `SPARK_ERROR` + +const TerminationReasonCodeSparkImageDownloadFailure TerminationReasonCode = `SPARK_IMAGE_DOWNLOAD_FAILURE` + +const TerminationReasonCodeSparkStartupFailure TerminationReasonCode = `SPARK_STARTUP_FAILURE` + +const TerminationReasonCodeSpotInstanceTermination TerminationReasonCode = `SPOT_INSTANCE_TERMINATION` + +const TerminationReasonCodeStorageDownloadFailure TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE` + +const TerminationReasonCodeStsClientSetupFailure TerminationReasonCode = `STS_CLIENT_SETUP_FAILURE` + +const TerminationReasonCodeSubnetExhaustedFailure TerminationReasonCode = `SUBNET_EXHAUSTED_FAILURE` + +const TerminationReasonCodeTemporarilyUnavailable TerminationReasonCode = `TEMPORARILY_UNAVAILABLE` + +const TerminationReasonCodeTrialExpired TerminationReasonCode = `TRIAL_EXPIRED` + +const TerminationReasonCodeUnexpectedLaunchFailure TerminationReasonCode = `UNEXPECTED_LAUNCH_FAILURE` + +const TerminationReasonCodeUnknown TerminationReasonCode = `UNKNOWN` + +const TerminationReasonCodeUnsupportedInstanceType TerminationReasonCode = `UNSUPPORTED_INSTANCE_TYPE` + +const TerminationReasonCodeUpdateInstanceProfileFailure TerminationReasonCode = `UPDATE_INSTANCE_PROFILE_FAILURE` + +const TerminationReasonCodeUserRequest TerminationReasonCode = `USER_REQUEST` + +const TerminationReasonCodeWorkerSetupFailure TerminationReasonCode = `WORKER_SETUP_FAILURE` + +const TerminationReasonCodeWorkspaceCancelledError TerminationReasonCode = `WORKSPACE_CANCELLED_ERROR` + +const TerminationReasonCodeWorkspaceConfigurationError TerminationReasonCode = `WORKSPACE_CONFIGURATION_ERROR` + +// String representation for [fmt.Print] +func (f *TerminationReasonCode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationReasonCode) Set(v string) error { + switch v { + case `ABUSE_DETECTED`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_SHUTDOWN`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `DATABASE_CONNECTION_FAILURE`, `DBFS_COMPONENT_UNHEALTHY`, `DOCKER_IMAGE_PULL_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `EXECUTION_COMPONENT_UNHEALTHY`, `GCP_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_UNREACHABLE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_SPARK_IMAGE`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `STORAGE_DOWNLOAD_FAILURE`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`: + *f = TerminationReasonCode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_SHUTDOWN", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "DATABASE_CONNECTION_FAILURE", "DBFS_COMPONENT_UNHEALTHY", "DOCKER_IMAGE_PULL_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "EXECUTION_COMPONENT_UNHEALTHY", "GCP_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_DELETED", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_UNREACHABLE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_SPARK_IMAGE", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "STORAGE_DOWNLOAD_FAILURE", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR"`, v) + } +} + +// Type always returns TerminationReasonCode to satisfy [pflag.Value] interface +func (f *TerminationReasonCode) Type() string { + return "TerminationReasonCode" +} + +// type of the termination +type TerminationReasonType string + +const TerminationReasonTypeClientError TerminationReasonType = `CLIENT_ERROR` + +const TerminationReasonTypeCloudFailure TerminationReasonType = `CLOUD_FAILURE` + +const TerminationReasonTypeServiceFault TerminationReasonType = `SERVICE_FAULT` + +const TerminationReasonTypeSuccess TerminationReasonType = `SUCCESS` + +// String representation for [fmt.Print] +func (f *TerminationReasonType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationReasonType) Set(v string) error { + switch v { + case `CLIENT_ERROR`, `CLOUD_FAILURE`, `SERVICE_FAULT`, `SUCCESS`: + *f = TerminationReasonType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLIENT_ERROR", "CLOUD_FAILURE", "SERVICE_FAULT", "SUCCESS"`, v) + } +} + +// Type always returns TerminationReasonType to satisfy [pflag.Value] interface +func (f *TerminationReasonType) Type() string { + return "TerminationReasonType" +} + +type TextValue struct { + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TextValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TextValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TimeRange struct { + // The end time in milliseconds. + EndTimeMs int64 `json:"end_time_ms,omitempty" url:"end_time_ms,omitempty"` + // The start time in milliseconds. + StartTimeMs int64 `json:"start_time_ms,omitempty" url:"start_time_ms,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TimeRange) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TimeRange) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TransferOwnershipObjectId struct { + // Email address for the new owner, who must exist in the workspace. + NewOwner string `json:"new_owner,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *TransferOwnershipObjectId) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TransferOwnershipObjectId) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Transfer object ownership +type TransferOwnershipRequest struct { + // Email address for the new owner, who must exist in the workspace. + NewOwner string `json:"new_owner,omitempty"` + // The ID of the object on which to change ownership. + ObjectId TransferOwnershipObjectId `json:"-" url:"-"` + // The type of object on which to change ownership. + ObjectType OwnableObjectType `json:"-" url:"-"` + + ForceSendFields []string `json:"-"` +} + +func (s *TransferOwnershipRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TransferOwnershipRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Delete an alert +type TrashAlertRequest struct { + Id string `json:"-" url:"-"` +} + +// Delete a query +type TrashQueryRequest struct { + Id string `json:"-" url:"-"` +} + +type UpdateAlertRequest struct { + Alert *UpdateAlertRequestAlert `json:"alert,omitempty"` + + Id string `json:"-" url:"-"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + UpdateMask string `json:"update_mask"` +} + +type UpdateAlertRequestAlert struct { + // Trigger conditions of the alert. + Condition *AlertCondition `json:"condition,omitempty"` + // Custom body of alert notification, if it exists. See [here] for custom + // templating instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomBody string `json:"custom_body,omitempty"` + // Custom subject of alert notification, if it exists. This can include + // email subject entries and Slack notification headers, for example. See + // [here] for custom templating instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomSubject string `json:"custom_subject,omitempty"` + // The display name of the alert. + DisplayName string `json:"display_name,omitempty"` + // Whether to notify alert subscribers when alert returns back to normal. + NotifyOnOk bool `json:"notify_on_ok,omitempty"` + // The owner's username. This field is set to "Unavailable" if the user has + // been deleted. + OwnerUserName string `json:"owner_user_name,omitempty"` + // UUID of the query attached to the alert. + QueryId string `json:"query_id,omitempty"` + // Number of seconds an alert must wait after being triggered to rearm + // itself. After rearming, it can be triggered again. If 0 or not specified, + // the alert will not be triggered again. + SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateAlertRequestAlert) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateAlertRequestAlert) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateQueryRequest struct { + Id string `json:"-" url:"-"` + + Query *UpdateQueryRequestQuery `json:"query,omitempty"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + UpdateMask string `json:"update_mask"` +} + +type UpdateQueryRequestQuery struct { + // Whether to apply a 1000 row limit to the query result. + ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"` + // Name of the catalog where this query will be executed. + Catalog string `json:"catalog,omitempty"` + // General description that conveys additional information about this query + // such as usage notes. + Description string `json:"description,omitempty"` + // Display name of the query that appears in list views, widget headings, + // and on the query page. + DisplayName string `json:"display_name,omitempty"` + // Username of the user that owns the query. + OwnerUserName string `json:"owner_user_name,omitempty"` + // List of query parameter definitions. + Parameters []QueryParameter `json:"parameters,omitempty"` + // Text of the query to be run. + QueryText string `json:"query_text,omitempty"` + // Sets the "Run as" role for the object. + RunAsMode RunAsMode `json:"run_as_mode,omitempty"` + // Name of the schema where this query will be executed. + Schema string `json:"schema,omitempty"` + + Tags []string `json:"tags,omitempty"` + // ID of the SQL warehouse attached to the query. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateQueryRequestQuery) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateQueryRequestQuery) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateResponse struct { +} + +type UpdateVisualizationRequest struct { + Id string `json:"-" url:"-"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + UpdateMask string `json:"update_mask"` + + Visualization *UpdateVisualizationRequestVisualization `json:"visualization,omitempty"` +} + +type UpdateVisualizationRequestVisualization struct { + // The display name of the visualization. + DisplayName string `json:"display_name,omitempty"` + // The visualization options varies widely from one visualization type to + // the next and is unsupported. Databricks does not recommend modifying + // visualization options directly. + SerializedOptions string `json:"serialized_options,omitempty"` + // The visualization query plan varies widely from one visualization type to + // the next and is unsupported. Databricks does not recommend modifying the + // visualization query plan directly. + SerializedQueryPlan string `json:"serialized_query_plan,omitempty"` + // The type of visualization: counter, table, funnel, and so on. + Type string `json:"type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateVisualizationRequestVisualization) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateVisualizationRequestVisualization) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type User struct { + Email string `json:"email,omitempty"` + + Id int `json:"id,omitempty"` + + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *User) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s User) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Visualization struct { + // The timestamp indicating when the visualization was created. + CreateTime string `json:"create_time,omitempty"` + // The display name of the visualization. + DisplayName string `json:"display_name,omitempty"` + // UUID identifying the visualization. + Id string `json:"id,omitempty"` + // UUID of the query that the visualization is attached to. + QueryId string `json:"query_id,omitempty"` + // The visualization options varies widely from one visualization type to + // the next and is unsupported. Databricks does not recommend modifying + // visualization options directly. + SerializedOptions string `json:"serialized_options,omitempty"` + // The visualization query plan varies widely from one visualization type to + // the next and is unsupported. Databricks does not recommend modifying the + // visualization query plan directly. + SerializedQueryPlan string `json:"serialized_query_plan,omitempty"` + // The type of visualization: counter, table, funnel, and so on. + Type string `json:"type,omitempty"` + // The timestamp indicating when the visualization was updated. + UpdateTime string `json:"update_time,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Visualization) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Visualization) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type WarehouseAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel WarehousePermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WarehouseAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WarehouseAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type WarehouseAccessControlResponse struct { + // All permissions. + AllPermissions []WarehousePermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WarehouseAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WarehouseAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type WarehousePermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel WarehousePermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WarehousePermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WarehousePermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type WarehousePermissionLevel string + +const WarehousePermissionLevelCanManage WarehousePermissionLevel = `CAN_MANAGE` + +const WarehousePermissionLevelCanMonitor WarehousePermissionLevel = `CAN_MONITOR` + +const WarehousePermissionLevelCanUse WarehousePermissionLevel = `CAN_USE` + +const WarehousePermissionLevelIsOwner WarehousePermissionLevel = `IS_OWNER` + +// String representation for [fmt.Print] +func (f *WarehousePermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WarehousePermissionLevel) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_MONITOR`, `CAN_USE`, `IS_OWNER`: + *f = WarehousePermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_MONITOR", "CAN_USE", "IS_OWNER"`, v) + } +} + +// Type always returns WarehousePermissionLevel to satisfy [pflag.Value] interface +func (f *WarehousePermissionLevel) Type() string { + return "WarehousePermissionLevel" +} + +type WarehousePermissions struct { + AccessControlList []WarehouseAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WarehousePermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WarehousePermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type WarehousePermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel WarehousePermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WarehousePermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WarehousePermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type WarehousePermissionsRequest struct { + AccessControlList []WarehouseAccessControlRequest `json:"access_control_list,omitempty"` + // The SQL warehouse for which to get or manage permissions. + WarehouseId string `json:"-" url:"-"` +} + +type WarehouseTypePair struct { + // If set to false the specific warehouse type will not be be allowed as a + // value for warehouse_type in CreateWarehouse and EditWarehouse + Enabled bool `json:"enabled,omitempty"` + // Warehouse type: `PRO` or `CLASSIC`. + WarehouseType WarehouseTypePairWarehouseType `json:"warehouse_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WarehouseTypePair) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WarehouseTypePair) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Warehouse type: `PRO` or `CLASSIC`. +type WarehouseTypePairWarehouseType string + +const WarehouseTypePairWarehouseTypeClassic WarehouseTypePairWarehouseType = `CLASSIC` + +const WarehouseTypePairWarehouseTypePro WarehouseTypePairWarehouseType = `PRO` + +const WarehouseTypePairWarehouseTypeTypeUnspecified WarehouseTypePairWarehouseType = `TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *WarehouseTypePairWarehouseType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WarehouseTypePairWarehouseType) Set(v string) error { + switch v { + case `CLASSIC`, `PRO`, `TYPE_UNSPECIFIED`: + *f = WarehouseTypePairWarehouseType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC", "PRO", "TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns WarehouseTypePairWarehouseType to satisfy [pflag.Value] interface +func (f *WarehouseTypePairWarehouseType) Type() string { + return "WarehouseTypePairWarehouseType" +} + +type Widget struct { + // The unique ID for this widget. + Id string `json:"id,omitempty"` + + Options *WidgetOptions `json:"options,omitempty"` + // The visualization description API changes frequently and is unsupported. + // You can duplicate a visualization by copying description objects received + // _from the API_ and then using them to create a new one with a POST + // request to the same endpoint. Databricks does not recommend constructing + // ad-hoc visualizations entirely in JSON. + Visualization *LegacyVisualization `json:"visualization,omitempty"` + // Unused field. + Width int `json:"width,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Widget) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Widget) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type WidgetOptions struct { + // Timestamp when this object was created + CreatedAt string `json:"created_at,omitempty"` + // Custom description of the widget + Description string `json:"description,omitempty"` + // Whether this widget is hidden on the dashboard. + IsHidden bool `json:"isHidden,omitempty"` + // How parameters used by the visualization in this widget relate to other + // widgets on the dashboard. Databricks does not recommend modifying this + // definition in JSON. + ParameterMappings any `json:"parameterMappings,omitempty"` + // Coordinates of this widget on a dashboard. This portion of the API + // changes frequently and is unsupported. + Position *WidgetPosition `json:"position,omitempty"` + // Custom title of the widget + Title string `json:"title,omitempty"` + // Timestamp of the last time this object was updated. + UpdatedAt string `json:"updated_at,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WidgetOptions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WidgetOptions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Coordinates of this widget on a dashboard. This portion of the API changes +// frequently and is unsupported. +type WidgetPosition struct { + // reserved for internal use + AutoHeight bool `json:"autoHeight,omitempty"` + // column in the dashboard grid. Values start with 0 + Col int `json:"col,omitempty"` + // row in the dashboard grid. Values start with 0 + Row int `json:"row,omitempty"` + // width of the widget measured in dashboard grid cells + SizeX int `json:"sizeX,omitempty"` + // height of the widget measured in dashboard grid cells + SizeY int `json:"sizeY,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WidgetPosition) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WidgetPosition) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} diff --git a/vectorsearch/v2preview/api.go b/vectorsearch/v2preview/api.go new file mode 100755 index 000000000..94b194203 --- /dev/null +++ b/vectorsearch/v2preview/api.go @@ -0,0 +1,182 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Vector Search Endpoints Preview, Vector Search Indexes Preview, etc. +package vectorsearchpreview + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" +) + +type VectorSearchEndpointsPreviewInterface interface { + + // Create an endpoint. + // + // Create a new endpoint. + CreateEndpoint(ctx context.Context, request CreateEndpoint) (*EndpointInfo, error) + + // Delete an endpoint. + DeleteEndpoint(ctx context.Context, request DeleteEndpointRequest) error + + // Delete an endpoint. + DeleteEndpointByEndpointName(ctx context.Context, endpointName string) error + + // Get an endpoint. + GetEndpoint(ctx context.Context, request GetEndpointRequest) (*EndpointInfo, error) + + // Get an endpoint. + GetEndpointByEndpointName(ctx context.Context, endpointName string) (*EndpointInfo, error) + + // List all endpoints. + // + // This method is generated by Databricks SDK Code Generator. + ListEndpoints(ctx context.Context, request ListEndpointsRequest) listing.Iterator[EndpointInfo] + + // List all endpoints. + // + // This method is generated by Databricks SDK Code Generator. + ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) +} + +func NewVectorSearchEndpointsPreview(client *client.DatabricksClient) *VectorSearchEndpointsPreviewAPI { + return &VectorSearchEndpointsPreviewAPI{ + vectorSearchEndpointsPreviewImpl: vectorSearchEndpointsPreviewImpl{ + client: client, + }, + } +} + +// **Endpoint**: Represents the compute resources to host vector search indexes. +type VectorSearchEndpointsPreviewAPI struct { + vectorSearchEndpointsPreviewImpl +} + +// Delete an endpoint. +func (a *VectorSearchEndpointsPreviewAPI) DeleteEndpointByEndpointName(ctx context.Context, endpointName string) error { + return a.vectorSearchEndpointsPreviewImpl.DeleteEndpoint(ctx, DeleteEndpointRequest{ + EndpointName: endpointName, + }) +} + +// Get an endpoint. +func (a *VectorSearchEndpointsPreviewAPI) GetEndpointByEndpointName(ctx context.Context, endpointName string) (*EndpointInfo, error) { + return a.vectorSearchEndpointsPreviewImpl.GetEndpoint(ctx, GetEndpointRequest{ + EndpointName: endpointName, + }) +} + +type VectorSearchIndexesPreviewInterface interface { + + // Create an index. + // + // Create a new index. + CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*CreateVectorIndexResponse, error) + + // Delete data from index. + // + // Handles the deletion of data from a specified vector index. + DeleteDataVectorIndex(ctx context.Context, request DeleteDataVectorIndexRequest) (*DeleteDataVectorIndexResponse, error) + + // Delete an index. + // + // Delete an index. + DeleteIndex(ctx context.Context, request DeleteIndexRequest) error + + // Delete an index. + // + // Delete an index. + DeleteIndexByIndexName(ctx context.Context, indexName string) error + + // Get an index. + // + // Get an index. + GetIndex(ctx context.Context, request GetIndexRequest) (*VectorIndex, error) + + // Get an index. + // + // Get an index. + GetIndexByIndexName(ctx context.Context, indexName string) (*VectorIndex, error) + + // List indexes. + // + // List all indexes in the given endpoint. + // + // This method is generated by Databricks SDK Code Generator. + ListIndexes(ctx context.Context, request ListIndexesRequest) listing.Iterator[MiniVectorIndex] + + // List indexes. + // + // List all indexes in the given endpoint. + // + // This method is generated by Databricks SDK Code Generator. + ListIndexesAll(ctx context.Context, request ListIndexesRequest) ([]MiniVectorIndex, error) + + // Query an index. + // + // Query the specified vector index. + QueryIndex(ctx context.Context, request QueryVectorIndexRequest) (*QueryVectorIndexResponse, error) + + // Query next page. + // + // Use `next_page_token` returned from previous `QueryVectorIndex` or + // `QueryVectorIndexNextPage` request to fetch next page of results. + QueryNextPage(ctx context.Context, request QueryVectorIndexNextPageRequest) (*QueryVectorIndexResponse, error) + + // Scan an index. + // + // Scan the specified vector index and return the first `num_results` entries + // after the exclusive `primary_key`. + ScanIndex(ctx context.Context, request ScanVectorIndexRequest) (*ScanVectorIndexResponse, error) + + // Synchronize an index. + // + // Triggers a synchronization process for a specified vector index. + SyncIndex(ctx context.Context, request SyncIndexRequest) error + + // Upsert data into an index. + // + // Handles the upserting of data into a specified vector index. + UpsertDataVectorIndex(ctx context.Context, request UpsertDataVectorIndexRequest) (*UpsertDataVectorIndexResponse, error) +} + +func NewVectorSearchIndexesPreview(client *client.DatabricksClient) *VectorSearchIndexesPreviewAPI { + return &VectorSearchIndexesPreviewAPI{ + vectorSearchIndexesPreviewImpl: vectorSearchIndexesPreviewImpl{ + client: client, + }, + } +} + +// **Index**: An efficient representation of your embedding vectors that +// supports real-time and efficient approximate nearest neighbor (ANN) search +// queries. +// +// There are 2 types of Vector Search indexes: * **Delta Sync Index**: An index +// that automatically syncs with a source Delta Table, automatically and +// incrementally updating the index as the underlying data in the Delta Table +// changes. * **Direct Vector Access Index**: An index that supports direct read +// and write of vectors and metadata through our REST and SDK APIs. With this +// model, the user manages index updates. +type VectorSearchIndexesPreviewAPI struct { + vectorSearchIndexesPreviewImpl +} + +// Delete an index. +// +// Delete an index. +func (a *VectorSearchIndexesPreviewAPI) DeleteIndexByIndexName(ctx context.Context, indexName string) error { + return a.vectorSearchIndexesPreviewImpl.DeleteIndex(ctx, DeleteIndexRequest{ + IndexName: indexName, + }) +} + +// Get an index. +// +// Get an index. +func (a *VectorSearchIndexesPreviewAPI) GetIndexByIndexName(ctx context.Context, indexName string) (*VectorIndex, error) { + return a.vectorSearchIndexesPreviewImpl.GetIndex(ctx, GetIndexRequest{ + IndexName: indexName, + }) +} diff --git a/vectorsearch/v2preview/client.go b/vectorsearch/v2preview/client.go new file mode 100755 index 000000000..caaa7769d --- /dev/null +++ b/vectorsearch/v2preview/client.go @@ -0,0 +1,79 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package vectorsearchpreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type VectorSearchEndpointsPreviewClient struct { + VectorSearchEndpointsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewVectorSearchEndpointsPreviewClient(cfg *config.Config) (*VectorSearchEndpointsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &VectorSearchEndpointsPreviewClient{ + Config: cfg, + apiClient: apiClient, + VectorSearchEndpointsPreviewInterface: NewVectorSearchEndpointsPreview(databricksClient), + }, nil +} + +type VectorSearchIndexesPreviewClient struct { + VectorSearchIndexesPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewVectorSearchIndexesPreviewClient(cfg *config.Config) (*VectorSearchIndexesPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &VectorSearchIndexesPreviewClient{ + Config: cfg, + apiClient: apiClient, + VectorSearchIndexesPreviewInterface: NewVectorSearchIndexesPreview(databricksClient), + }, nil +} diff --git a/vectorsearch/v2preview/impl.go b/vectorsearch/v2preview/impl.go new file mode 100755 index 000000000..9dfd88123 --- /dev/null +++ b/vectorsearch/v2preview/impl.go @@ -0,0 +1,231 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package vectorsearchpreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just VectorSearchEndpointsPreview API methods +type vectorSearchEndpointsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *vectorSearchEndpointsPreviewImpl) CreateEndpoint(ctx context.Context, request CreateEndpoint) (*EndpointInfo, error) { + var endpointInfo EndpointInfo + path := "/api/2.0preview/vector-search/endpoints" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &endpointInfo) + return &endpointInfo, err +} + +func (a *vectorSearchEndpointsPreviewImpl) DeleteEndpoint(ctx context.Context, request DeleteEndpointRequest) error { + var deleteEndpointResponse DeleteEndpointResponse + path := fmt.Sprintf("/api/2.0preview/vector-search/endpoints/%v", request.EndpointName) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteEndpointResponse) + return err +} + +func (a *vectorSearchEndpointsPreviewImpl) GetEndpoint(ctx context.Context, request GetEndpointRequest) (*EndpointInfo, error) { + var endpointInfo EndpointInfo + path := fmt.Sprintf("/api/2.0preview/vector-search/endpoints/%v", request.EndpointName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &endpointInfo) + return &endpointInfo, err +} + +// List all endpoints. +func (a *vectorSearchEndpointsPreviewImpl) ListEndpoints(ctx context.Context, request ListEndpointsRequest) listing.Iterator[EndpointInfo] { + + getNextPage := func(ctx context.Context, req ListEndpointsRequest) (*ListEndpointResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListEndpoints(ctx, req) + } + getItems := func(resp *ListEndpointResponse) []EndpointInfo { + return resp.Endpoints + } + getNextReq := func(resp *ListEndpointResponse) *ListEndpointsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List all endpoints. +func (a *vectorSearchEndpointsPreviewImpl) ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) { + iterator := a.ListEndpoints(ctx, request) + return listing.ToSlice[EndpointInfo](ctx, iterator) +} +func (a *vectorSearchEndpointsPreviewImpl) internalListEndpoints(ctx context.Context, request ListEndpointsRequest) (*ListEndpointResponse, error) { + var listEndpointResponse ListEndpointResponse + path := "/api/2.0preview/vector-search/endpoints" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listEndpointResponse) + return &listEndpointResponse, err +} + +// unexported type that holds implementations of just VectorSearchIndexesPreview API methods +type vectorSearchIndexesPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *vectorSearchIndexesPreviewImpl) CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*CreateVectorIndexResponse, error) { + var createVectorIndexResponse CreateVectorIndexResponse + path := "/api/2.0preview/vector-search/indexes" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createVectorIndexResponse) + return &createVectorIndexResponse, err +} + +func (a *vectorSearchIndexesPreviewImpl) DeleteDataVectorIndex(ctx context.Context, request DeleteDataVectorIndexRequest) (*DeleteDataVectorIndexResponse, error) { + var deleteDataVectorIndexResponse DeleteDataVectorIndexResponse + path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/delete-data", request.IndexName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteDataVectorIndexResponse) + return &deleteDataVectorIndexResponse, err +} + +func (a *vectorSearchIndexesPreviewImpl) DeleteIndex(ctx context.Context, request DeleteIndexRequest) error { + var deleteIndexResponse DeleteIndexResponse + path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v", request.IndexName) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteIndexResponse) + return err +} + +func (a *vectorSearchIndexesPreviewImpl) GetIndex(ctx context.Context, request GetIndexRequest) (*VectorIndex, error) { + var vectorIndex VectorIndex + path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v", request.IndexName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &vectorIndex) + return &vectorIndex, err +} + +// List indexes. +// +// List all indexes in the given endpoint. +func (a *vectorSearchIndexesPreviewImpl) ListIndexes(ctx context.Context, request ListIndexesRequest) listing.Iterator[MiniVectorIndex] { + + getNextPage := func(ctx context.Context, req ListIndexesRequest) (*ListVectorIndexesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListIndexes(ctx, req) + } + getItems := func(resp *ListVectorIndexesResponse) []MiniVectorIndex { + return resp.VectorIndexes + } + getNextReq := func(resp *ListVectorIndexesResponse) *ListIndexesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List indexes. +// +// List all indexes in the given endpoint. +func (a *vectorSearchIndexesPreviewImpl) ListIndexesAll(ctx context.Context, request ListIndexesRequest) ([]MiniVectorIndex, error) { + iterator := a.ListIndexes(ctx, request) + return listing.ToSlice[MiniVectorIndex](ctx, iterator) +} +func (a *vectorSearchIndexesPreviewImpl) internalListIndexes(ctx context.Context, request ListIndexesRequest) (*ListVectorIndexesResponse, error) { + var listVectorIndexesResponse ListVectorIndexesResponse + path := "/api/2.0preview/vector-search/indexes" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listVectorIndexesResponse) + return &listVectorIndexesResponse, err +} + +func (a *vectorSearchIndexesPreviewImpl) QueryIndex(ctx context.Context, request QueryVectorIndexRequest) (*QueryVectorIndexResponse, error) { + var queryVectorIndexResponse QueryVectorIndexResponse + path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/query", request.IndexName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &queryVectorIndexResponse) + return &queryVectorIndexResponse, err +} + +func (a *vectorSearchIndexesPreviewImpl) QueryNextPage(ctx context.Context, request QueryVectorIndexNextPageRequest) (*QueryVectorIndexResponse, error) { + var queryVectorIndexResponse QueryVectorIndexResponse + path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/query-next-page", request.IndexName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &queryVectorIndexResponse) + return &queryVectorIndexResponse, err +} + +func (a *vectorSearchIndexesPreviewImpl) ScanIndex(ctx context.Context, request ScanVectorIndexRequest) (*ScanVectorIndexResponse, error) { + var scanVectorIndexResponse ScanVectorIndexResponse + path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/scan", request.IndexName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &scanVectorIndexResponse) + return &scanVectorIndexResponse, err +} + +func (a *vectorSearchIndexesPreviewImpl) SyncIndex(ctx context.Context, request SyncIndexRequest) error { + var syncIndexResponse SyncIndexResponse + path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/sync", request.IndexName) + queryParams := make(map[string]any) + headers := make(map[string]string) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &syncIndexResponse) + return err +} + +func (a *vectorSearchIndexesPreviewImpl) UpsertDataVectorIndex(ctx context.Context, request UpsertDataVectorIndexRequest) (*UpsertDataVectorIndexResponse, error) { + var upsertDataVectorIndexResponse UpsertDataVectorIndexResponse + path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/upsert-data", request.IndexName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &upsertDataVectorIndexResponse) + return &upsertDataVectorIndexResponse, err +} diff --git a/vectorsearch/v2preview/model.go b/vectorsearch/v2preview/model.go new file mode 100755 index 000000000..28a41f419 --- /dev/null +++ b/vectorsearch/v2preview/model.go @@ -0,0 +1,898 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package vectorsearchpreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type ColumnInfo struct { + // Name of the column. + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ColumnInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ColumnInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateEndpoint struct { + // Type of endpoint. + EndpointType EndpointType `json:"endpoint_type"` + // Name of endpoint + Name string `json:"name"` +} + +type CreateVectorIndexRequest struct { + // Specification for Delta Sync Index. Required if `index_type` is + // `DELTA_SYNC`. + DeltaSyncIndexSpec *DeltaSyncVectorIndexSpecRequest `json:"delta_sync_index_spec,omitempty"` + // Specification for Direct Vector Access Index. Required if `index_type` is + // `DIRECT_ACCESS`. + DirectAccessIndexSpec *DirectAccessVectorIndexSpec `json:"direct_access_index_spec,omitempty"` + // Name of the endpoint to be used for serving the index + EndpointName string `json:"endpoint_name"` + // There are 2 types of Vector Search indexes: + // + // - `DELTA_SYNC`: An index that automatically syncs with a source Delta + // Table, automatically and incrementally updating the index as the + // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index + // that supports direct read and write of vectors and metadata through our + // REST and SDK APIs. With this model, the user manages index updates. + IndexType VectorIndexType `json:"index_type"` + // Name of the index + Name string `json:"name"` + // Primary key of the index + PrimaryKey string `json:"primary_key"` +} + +type CreateVectorIndexResponse struct { + VectorIndex *VectorIndex `json:"vector_index,omitempty"` +} + +// Result of the upsert or delete operation. +type DeleteDataResult struct { + // List of primary keys for rows that failed to process. + FailedPrimaryKeys []string `json:"failed_primary_keys,omitempty"` + // Count of successfully processed rows. + SuccessRowCount int64 `json:"success_row_count,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteDataResult) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteDataResult) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Status of the delete operation. +type DeleteDataStatus string + +const DeleteDataStatusFailure DeleteDataStatus = `FAILURE` + +const DeleteDataStatusPartialSuccess DeleteDataStatus = `PARTIAL_SUCCESS` + +const DeleteDataStatusSuccess DeleteDataStatus = `SUCCESS` + +// String representation for [fmt.Print] +func (f *DeleteDataStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DeleteDataStatus) Set(v string) error { + switch v { + case `FAILURE`, `PARTIAL_SUCCESS`, `SUCCESS`: + *f = DeleteDataStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILURE", "PARTIAL_SUCCESS", "SUCCESS"`, v) + } +} + +// Type always returns DeleteDataStatus to satisfy [pflag.Value] interface +func (f *DeleteDataStatus) Type() string { + return "DeleteDataStatus" +} + +// Request payload for deleting data from a vector index. +type DeleteDataVectorIndexRequest struct { + // Name of the vector index where data is to be deleted. Must be a Direct + // Vector Access Index. + IndexName string `json:"-" url:"-"` + // List of primary keys for the data to be deleted. + PrimaryKeys []string `json:"primary_keys"` +} + +// Response to a delete data vector index request. +type DeleteDataVectorIndexResponse struct { + // Result of the upsert or delete operation. + Result *DeleteDataResult `json:"result,omitempty"` + // Status of the delete operation. + Status DeleteDataStatus `json:"status,omitempty"` +} + +// Delete an endpoint +type DeleteEndpointRequest struct { + // Name of the endpoint + EndpointName string `json:"-" url:"-"` +} + +type DeleteEndpointResponse struct { +} + +// Delete an index +type DeleteIndexRequest struct { + // Name of the index + IndexName string `json:"-" url:"-"` +} + +type DeleteIndexResponse struct { +} + +type DeltaSyncVectorIndexSpecRequest struct { + // [Optional] Select the columns to sync with the vector index. If you leave + // this field blank, all columns from the source table are synced with the + // index. The primary key column and embedding source column or embedding + // vector column are always synced. + ColumnsToSync []string `json:"columns_to_sync,omitempty"` + // The columns that contain the embedding source. + EmbeddingSourceColumns []EmbeddingSourceColumn `json:"embedding_source_columns,omitempty"` + // The columns that contain the embedding vectors. The format should be + // array[double]. + EmbeddingVectorColumns []EmbeddingVectorColumn `json:"embedding_vector_columns,omitempty"` + // [Optional] Automatically sync the vector index contents and computed + // embeddings to the specified Delta table. The only supported table name is + // the index name with the suffix `_writeback_table`. + EmbeddingWritebackTable string `json:"embedding_writeback_table,omitempty"` + // Pipeline execution mode. + // + // - `TRIGGERED`: If the pipeline uses the triggered execution mode, the + // system stops processing after successfully refreshing the source table in + // the pipeline once, ensuring the table is updated based on the data + // available when the update started. - `CONTINUOUS`: If the pipeline uses + // continuous execution, the pipeline processes new data as it arrives in + // the source table to keep vector index fresh. + PipelineType PipelineType `json:"pipeline_type,omitempty"` + // The name of the source table. + SourceTable string `json:"source_table,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeltaSyncVectorIndexSpecRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeltaSyncVectorIndexSpecRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DeltaSyncVectorIndexSpecResponse struct { + // The columns that contain the embedding source. + EmbeddingSourceColumns []EmbeddingSourceColumn `json:"embedding_source_columns,omitempty"` + // The columns that contain the embedding vectors. + EmbeddingVectorColumns []EmbeddingVectorColumn `json:"embedding_vector_columns,omitempty"` + // [Optional] Name of the Delta table to sync the vector index contents and + // computed embeddings to. + EmbeddingWritebackTable string `json:"embedding_writeback_table,omitempty"` + // The ID of the pipeline that is used to sync the index. + PipelineId string `json:"pipeline_id,omitempty"` + // Pipeline execution mode. + // + // - `TRIGGERED`: If the pipeline uses the triggered execution mode, the + // system stops processing after successfully refreshing the source table in + // the pipeline once, ensuring the table is updated based on the data + // available when the update started. - `CONTINUOUS`: If the pipeline uses + // continuous execution, the pipeline processes new data as it arrives in + // the source table to keep vector index fresh. + PipelineType PipelineType `json:"pipeline_type,omitempty"` + // The name of the source table. + SourceTable string `json:"source_table,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeltaSyncVectorIndexSpecResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeltaSyncVectorIndexSpecResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DirectAccessVectorIndexSpec struct { + // Contains the optional model endpoint to use during query time. + EmbeddingSourceColumns []EmbeddingSourceColumn `json:"embedding_source_columns,omitempty"` + + EmbeddingVectorColumns []EmbeddingVectorColumn `json:"embedding_vector_columns,omitempty"` + // The schema of the index in JSON format. + // + // Supported types are `integer`, `long`, `float`, `double`, `boolean`, + // `string`, `date`, `timestamp`. + // + // Supported types for vector column: `array`, `array`,`. + SchemaJson string `json:"schema_json,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DirectAccessVectorIndexSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DirectAccessVectorIndexSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EmbeddingSourceColumn struct { + // Name of the embedding model endpoint + EmbeddingModelEndpointName string `json:"embedding_model_endpoint_name,omitempty"` + // Name of the column + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EmbeddingSourceColumn) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EmbeddingSourceColumn) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EmbeddingVectorColumn struct { + // Dimension of the embedding vector + EmbeddingDimension int `json:"embedding_dimension,omitempty"` + // Name of the column + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EmbeddingVectorColumn) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EmbeddingVectorColumn) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EndpointInfo struct { + // Timestamp of endpoint creation + CreationTimestamp int64 `json:"creation_timestamp,omitempty"` + // Creator of the endpoint + Creator string `json:"creator,omitempty"` + // Current status of the endpoint + EndpointStatus *EndpointStatus `json:"endpoint_status,omitempty"` + // Type of endpoint. + EndpointType EndpointType `json:"endpoint_type,omitempty"` + // Unique identifier of the endpoint + Id string `json:"id,omitempty"` + // Timestamp of last update to the endpoint + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // User who last updated the endpoint + LastUpdatedUser string `json:"last_updated_user,omitempty"` + // Name of endpoint + Name string `json:"name,omitempty"` + // Number of indexes on the endpoint + NumIndexes int `json:"num_indexes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EndpointInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EndpointInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Status information of an endpoint +type EndpointStatus struct { + // Additional status message + Message string `json:"message,omitempty"` + // Current state of the endpoint + State EndpointStatusState `json:"state,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *EndpointStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EndpointStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Current state of the endpoint +type EndpointStatusState string + +const EndpointStatusStateOffline EndpointStatusState = `OFFLINE` + +const EndpointStatusStateOnline EndpointStatusState = `ONLINE` + +const EndpointStatusStateProvisioning EndpointStatusState = `PROVISIONING` + +// String representation for [fmt.Print] +func (f *EndpointStatusState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointStatusState) Set(v string) error { + switch v { + case `OFFLINE`, `ONLINE`, `PROVISIONING`: + *f = EndpointStatusState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "OFFLINE", "ONLINE", "PROVISIONING"`, v) + } +} + +// Type always returns EndpointStatusState to satisfy [pflag.Value] interface +func (f *EndpointStatusState) Type() string { + return "EndpointStatusState" +} + +// Type of endpoint. +type EndpointType string + +const EndpointTypeStandard EndpointType = `STANDARD` + +// String representation for [fmt.Print] +func (f *EndpointType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointType) Set(v string) error { + switch v { + case `STANDARD`: + *f = EndpointType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "STANDARD"`, v) + } +} + +// Type always returns EndpointType to satisfy [pflag.Value] interface +func (f *EndpointType) Type() string { + return "EndpointType" +} + +// Get an endpoint +type GetEndpointRequest struct { + // Name of the endpoint + EndpointName string `json:"-" url:"-"` +} + +// Get an index +type GetIndexRequest struct { + // Name of the index + IndexName string `json:"-" url:"-"` +} + +type ListEndpointResponse struct { + // An array of Endpoint objects + Endpoints []EndpointInfo `json:"endpoints,omitempty"` + // A token that can be used to get the next page of results. If not present, + // there are no more results to show. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListEndpointResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListEndpointResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List all endpoints +type ListEndpointsRequest struct { + // Token for pagination + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListEndpointsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListEndpointsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// List indexes +type ListIndexesRequest struct { + // Name of the endpoint + EndpointName string `json:"-" url:"endpoint_name"` + // Token for pagination + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListIndexesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListIndexesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListValue struct { + Values []Value `json:"values,omitempty"` +} + +type ListVectorIndexesResponse struct { + // A token that can be used to get the next page of results. If not present, + // there are no more results to show. + NextPageToken string `json:"next_page_token,omitempty"` + + VectorIndexes []MiniVectorIndex `json:"vector_indexes,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListVectorIndexesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListVectorIndexesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Key-value pair. +type MapStringValueEntry struct { + // Column name. + Key string `json:"key,omitempty"` + // Column value, nullable. + Value *Value `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MapStringValueEntry) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MapStringValueEntry) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type MiniVectorIndex struct { + // The user who created the index. + Creator string `json:"creator,omitempty"` + // Name of the endpoint associated with the index + EndpointName string `json:"endpoint_name,omitempty"` + // There are 2 types of Vector Search indexes: + // + // - `DELTA_SYNC`: An index that automatically syncs with a source Delta + // Table, automatically and incrementally updating the index as the + // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index + // that supports direct read and write of vectors and metadata through our + // REST and SDK APIs. With this model, the user manages index updates. + IndexType VectorIndexType `json:"index_type,omitempty"` + // Name of the index + Name string `json:"name,omitempty"` + // Primary key of the index + PrimaryKey string `json:"primary_key,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *MiniVectorIndex) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MiniVectorIndex) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Pipeline execution mode. +// +// - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system +// stops processing after successfully refreshing the source table in the +// pipeline once, ensuring the table is updated based on the data available when +// the update started. - `CONTINUOUS`: If the pipeline uses continuous +// execution, the pipeline processes new data as it arrives in the source table +// to keep vector index fresh. +type PipelineType string + +// If the pipeline uses continuous execution, the pipeline processes new data as +// it arrives in the source table to keep vector index fresh. +const PipelineTypeContinuous PipelineType = `CONTINUOUS` + +// If the pipeline uses the triggered execution mode, the system stops +// processing after successfully refreshing the source table in the pipeline +// once, ensuring the table is updated based on the data available when the +// update started. +const PipelineTypeTriggered PipelineType = `TRIGGERED` + +// String representation for [fmt.Print] +func (f *PipelineType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PipelineType) Set(v string) error { + switch v { + case `CONTINUOUS`, `TRIGGERED`: + *f = PipelineType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CONTINUOUS", "TRIGGERED"`, v) + } +} + +// Type always returns PipelineType to satisfy [pflag.Value] interface +func (f *PipelineType) Type() string { + return "PipelineType" +} + +// Request payload for getting next page of results. +type QueryVectorIndexNextPageRequest struct { + // Name of the endpoint. + EndpointName string `json:"endpoint_name,omitempty"` + // Name of the vector index to query. + IndexName string `json:"-" url:"-"` + // Page token returned from previous `QueryVectorIndex` or + // `QueryVectorIndexNextPage` API. + PageToken string `json:"page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryVectorIndexNextPageRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryVectorIndexNextPageRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryVectorIndexRequest struct { + // List of column names to include in the response. + Columns []string `json:"columns"` + // JSON string representing query filters. + // + // Example filters: - `{"id <": 5}`: Filter for id less than 5. - `{"id >": + // 5}`: Filter for id greater than 5. - `{"id <=": 5}`: Filter for id less + // than equal to 5. - `{"id >=": 5}`: Filter for id greater than equal to 5. + // - `{"id": 5}`: Filter for id equal to 5. + FiltersJson string `json:"filters_json,omitempty"` + // Name of the vector index to query. + IndexName string `json:"-" url:"-"` + // Number of results to return. Defaults to 10. + NumResults int `json:"num_results,omitempty"` + // Query text. Required for Delta Sync Index using model endpoint. + QueryText string `json:"query_text,omitempty"` + // The query type to use. Choices are `ANN` and `HYBRID`. Defaults to `ANN`. + QueryType string `json:"query_type,omitempty"` + // Query vector. Required for Direct Vector Access Index and Delta Sync + // Index using self-managed vectors. + QueryVector []float64 `json:"query_vector,omitempty"` + // Threshold for the approximate nearest neighbor search. Defaults to 0.0. + ScoreThreshold float64 `json:"score_threshold,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryVectorIndexRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryVectorIndexRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type QueryVectorIndexResponse struct { + // Metadata about the result set. + Manifest *ResultManifest `json:"manifest,omitempty"` + // [Optional] Token that can be used in `QueryVectorIndexNextPage` API to + // get next page of results. If more than 1000 results satisfy the query, + // they are returned in groups of 1000. Empty value means no more results. + NextPageToken string `json:"next_page_token,omitempty"` + // Data returned in the query result. + Result *ResultData `json:"result,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryVectorIndexResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryVectorIndexResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Data returned in the query result. +type ResultData struct { + // Data rows returned in the query. + DataArray [][]string `json:"data_array,omitempty"` + // Number of rows in the result set. + RowCount int `json:"row_count,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResultData) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResultData) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Metadata about the result set. +type ResultManifest struct { + // Number of columns in the result set. + ColumnCount int `json:"column_count,omitempty"` + // Information about each column in the result set. + Columns []ColumnInfo `json:"columns,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResultManifest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResultManifest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Request payload for scanning data from a vector index. +type ScanVectorIndexRequest struct { + // Name of the vector index to scan. + IndexName string `json:"-" url:"-"` + // Primary key of the last entry returned in the previous scan. + LastPrimaryKey string `json:"last_primary_key,omitempty"` + // Number of results to return. Defaults to 10. + NumResults int `json:"num_results,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ScanVectorIndexRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ScanVectorIndexRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Response to a scan vector index request. +type ScanVectorIndexResponse struct { + // List of data entries + Data []Struct `json:"data,omitempty"` + // Primary key of the last entry. + LastPrimaryKey string `json:"last_primary_key,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ScanVectorIndexResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ScanVectorIndexResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Struct struct { + // Data entry, corresponding to a row in a vector index. + Fields []MapStringValueEntry `json:"fields,omitempty"` +} + +// Synchronize an index +type SyncIndexRequest struct { + // Name of the vector index to synchronize. Must be a Delta Sync Index. + IndexName string `json:"-" url:"-"` +} + +type SyncIndexResponse struct { +} + +// Result of the upsert or delete operation. +type UpsertDataResult struct { + // List of primary keys for rows that failed to process. + FailedPrimaryKeys []string `json:"failed_primary_keys,omitempty"` + // Count of successfully processed rows. + SuccessRowCount int64 `json:"success_row_count,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpsertDataResult) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpsertDataResult) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Status of the upsert operation. +type UpsertDataStatus string + +const UpsertDataStatusFailure UpsertDataStatus = `FAILURE` + +const UpsertDataStatusPartialSuccess UpsertDataStatus = `PARTIAL_SUCCESS` + +const UpsertDataStatusSuccess UpsertDataStatus = `SUCCESS` + +// String representation for [fmt.Print] +func (f *UpsertDataStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpsertDataStatus) Set(v string) error { + switch v { + case `FAILURE`, `PARTIAL_SUCCESS`, `SUCCESS`: + *f = UpsertDataStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILURE", "PARTIAL_SUCCESS", "SUCCESS"`, v) + } +} + +// Type always returns UpsertDataStatus to satisfy [pflag.Value] interface +func (f *UpsertDataStatus) Type() string { + return "UpsertDataStatus" +} + +// Request payload for upserting data into a vector index. +type UpsertDataVectorIndexRequest struct { + // Name of the vector index where data is to be upserted. Must be a Direct + // Vector Access Index. + IndexName string `json:"-" url:"-"` + // JSON string representing the data to be upserted. + InputsJson string `json:"inputs_json"` +} + +// Response to an upsert data vector index request. +type UpsertDataVectorIndexResponse struct { + // Result of the upsert or delete operation. + Result *UpsertDataResult `json:"result,omitempty"` + // Status of the upsert operation. + Status UpsertDataStatus `json:"status,omitempty"` +} + +type Value struct { + BoolValue bool `json:"bool_value,omitempty"` + + ListValue *ListValue `json:"list_value,omitempty"` + + NullValue string `json:"null_value,omitempty"` + + NumberValue float64 `json:"number_value,omitempty"` + + StringValue string `json:"string_value,omitempty"` + + StructValue *Struct `json:"struct_value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Value) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Value) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type VectorIndex struct { + // The user who created the index. + Creator string `json:"creator,omitempty"` + + DeltaSyncIndexSpec *DeltaSyncVectorIndexSpecResponse `json:"delta_sync_index_spec,omitempty"` + + DirectAccessIndexSpec *DirectAccessVectorIndexSpec `json:"direct_access_index_spec,omitempty"` + // Name of the endpoint associated with the index + EndpointName string `json:"endpoint_name,omitempty"` + // There are 2 types of Vector Search indexes: + // + // - `DELTA_SYNC`: An index that automatically syncs with a source Delta + // Table, automatically and incrementally updating the index as the + // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index + // that supports direct read and write of vectors and metadata through our + // REST and SDK APIs. With this model, the user manages index updates. + IndexType VectorIndexType `json:"index_type,omitempty"` + // Name of the index + Name string `json:"name,omitempty"` + // Primary key of the index + PrimaryKey string `json:"primary_key,omitempty"` + + Status *VectorIndexStatus `json:"status,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *VectorIndex) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s VectorIndex) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type VectorIndexStatus struct { + // Index API Url to be used to perform operations on the index + IndexUrl string `json:"index_url,omitempty"` + // Number of rows indexed + IndexedRowCount int64 `json:"indexed_row_count,omitempty"` + // Message associated with the index status + Message string `json:"message,omitempty"` + // Whether the index is ready for search + Ready bool `json:"ready,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *VectorIndexStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s VectorIndexStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// There are 2 types of Vector Search indexes: +// +// - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, +// automatically and incrementally updating the index as the underlying data in +// the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct +// read and write of vectors and metadata through our REST and SDK APIs. With +// this model, the user manages index updates. +type VectorIndexType string + +// An index that automatically syncs with a source Delta Table, automatically +// and incrementally updating the index as the underlying data in the Delta +// Table changes. +const VectorIndexTypeDeltaSync VectorIndexType = `DELTA_SYNC` + +// An index that supports direct read and write of vectors and metadata through +// our REST and SDK APIs. With this model, the user manages index updates. +const VectorIndexTypeDirectAccess VectorIndexType = `DIRECT_ACCESS` + +// String representation for [fmt.Print] +func (f *VectorIndexType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *VectorIndexType) Set(v string) error { + switch v { + case `DELTA_SYNC`, `DIRECT_ACCESS`: + *f = VectorIndexType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELTA_SYNC", "DIRECT_ACCESS"`, v) + } +} + +// Type always returns VectorIndexType to satisfy [pflag.Value] interface +func (f *VectorIndexType) Type() string { + return "VectorIndexType" +} diff --git a/workspace/v2preview/api.go b/workspace/v2preview/api.go new file mode 100755 index 000000000..77e0d0a35 --- /dev/null +++ b/workspace/v2preview/api.go @@ -0,0 +1,931 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +// These APIs allow you to manage Git Credentials Preview, Repos Preview, Secrets Preview, Workspace Preview, etc. +package workspacepreview + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type GitCredentialsPreviewInterface interface { + + // Create a credential entry. + // + // Creates a Git credential entry for the user. Only one Git credential per user + // is supported, so any attempts to create credentials if an entry already + // exists will fail. Use the PATCH endpoint to update existing credentials, or + // the DELETE endpoint to delete existing credentials. + Create(ctx context.Context, request CreateCredentialsRequest) (*CreateCredentialsResponse, error) + + // Delete a credential. + // + // Deletes the specified Git credential. + Delete(ctx context.Context, request DeleteCredentialsRequest) error + + // Delete a credential. + // + // Deletes the specified Git credential. + DeleteByCredentialId(ctx context.Context, credentialId int64) error + + // Get a credential entry. + // + // Gets the Git credential with the specified credential ID. + Get(ctx context.Context, request GetCredentialsRequest) (*GetCredentialsResponse, error) + + // Get a credential entry. + // + // Gets the Git credential with the specified credential ID. + GetByCredentialId(ctx context.Context, credentialId int64) (*GetCredentialsResponse, error) + + // Get Git credentials. + // + // Lists the calling user's Git credentials. One credential per user is + // supported. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context) listing.Iterator[CredentialInfo] + + // Get Git credentials. + // + // Lists the calling user's Git credentials. One credential per user is + // supported. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context) ([]CredentialInfo, error) + + // CredentialInfoGitProviderToCredentialIdMap calls [GitCredentialsPreviewAPI.ListAll] and creates a map of results with [CredentialInfo].GitProvider as key and [CredentialInfo].CredentialId as value. + // + // Returns an error if there's more than one [CredentialInfo] with the same .GitProvider. + // + // Note: All [CredentialInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + CredentialInfoGitProviderToCredentialIdMap(ctx context.Context) (map[string]int64, error) + + // GetByGitProvider calls [GitCredentialsPreviewAPI.CredentialInfoGitProviderToCredentialIdMap] and returns a single [CredentialInfo]. + // + // Returns an error if there's more than one [CredentialInfo] with the same .GitProvider. + // + // Note: All [CredentialInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByGitProvider(ctx context.Context, name string) (*CredentialInfo, error) + + // Update a credential. + // + // Updates the specified Git credential. + Update(ctx context.Context, request UpdateCredentialsRequest) error +} + +func NewGitCredentialsPreview(client *client.DatabricksClient) *GitCredentialsPreviewAPI { + return &GitCredentialsPreviewAPI{ + gitCredentialsPreviewImpl: gitCredentialsPreviewImpl{ + client: client, + }, + } +} + +// Registers personal access token for Databricks to do operations on behalf of +// the user. +// +// See [more info]. +// +// [more info]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html +type GitCredentialsPreviewAPI struct { + gitCredentialsPreviewImpl +} + +// Delete a credential. +// +// Deletes the specified Git credential. +func (a *GitCredentialsPreviewAPI) DeleteByCredentialId(ctx context.Context, credentialId int64) error { + return a.gitCredentialsPreviewImpl.Delete(ctx, DeleteCredentialsRequest{ + CredentialId: credentialId, + }) +} + +// Get a credential entry. +// +// Gets the Git credential with the specified credential ID. +func (a *GitCredentialsPreviewAPI) GetByCredentialId(ctx context.Context, credentialId int64) (*GetCredentialsResponse, error) { + return a.gitCredentialsPreviewImpl.Get(ctx, GetCredentialsRequest{ + CredentialId: credentialId, + }) +} + +// CredentialInfoGitProviderToCredentialIdMap calls [GitCredentialsPreviewAPI.ListAll] and creates a map of results with [CredentialInfo].GitProvider as key and [CredentialInfo].CredentialId as value. +// +// Returns an error if there's more than one [CredentialInfo] with the same .GitProvider. +// +// Note: All [CredentialInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *GitCredentialsPreviewAPI) CredentialInfoGitProviderToCredentialIdMap(ctx context.Context) (map[string]int64, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]int64{} + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.GitProvider + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .GitProvider: %s", key) + } + mapping[key] = v.CredentialId + } + return mapping, nil +} + +// GetByGitProvider calls [GitCredentialsPreviewAPI.CredentialInfoGitProviderToCredentialIdMap] and returns a single [CredentialInfo]. +// +// Returns an error if there's more than one [CredentialInfo] with the same .GitProvider. +// +// Note: All [CredentialInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *GitCredentialsPreviewAPI) GetByGitProvider(ctx context.Context, name string) (*CredentialInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx) + if err != nil { + return nil, err + } + tmp := map[string][]CredentialInfo{} + for _, v := range result { + key := v.GitProvider + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("CredentialInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of CredentialInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type ReposPreviewInterface interface { + + // Create a repo. + // + // Creates a repo in the workspace and links it to the remote Git repo + // specified. Note that repos created programmatically must be linked to a + // remote Git repo, unlike repos created in the browser. + Create(ctx context.Context, request CreateRepoRequest) (*CreateRepoResponse, error) + + // Delete a repo. + // + // Deletes the specified repo. + Delete(ctx context.Context, request DeleteRepoRequest) error + + // Delete a repo. + // + // Deletes the specified repo. + DeleteByRepoId(ctx context.Context, repoId int64) error + + // Get a repo. + // + // Returns the repo with the given repo ID. + Get(ctx context.Context, request GetRepoRequest) (*GetRepoResponse, error) + + // Get a repo. + // + // Returns the repo with the given repo ID. + GetByRepoId(ctx context.Context, repoId int64) (*GetRepoResponse, error) + + // Get repo permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetRepoPermissionLevelsRequest) (*GetRepoPermissionLevelsResponse, error) + + // Get repo permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByRepoId(ctx context.Context, repoId string) (*GetRepoPermissionLevelsResponse, error) + + // Get repo permissions. + // + // Gets the permissions of a repo. Repos can inherit permissions from their root + // object. + GetPermissions(ctx context.Context, request GetRepoPermissionsRequest) (*RepoPermissions, error) + + // Get repo permissions. + // + // Gets the permissions of a repo. Repos can inherit permissions from their root + // object. + GetPermissionsByRepoId(ctx context.Context, repoId string) (*RepoPermissions, error) + + // Get repos. + // + // Returns repos that the calling user has Manage permissions on. Use + // `next_page_token` to iterate through additional pages. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListReposRequest) listing.Iterator[RepoInfo] + + // Get repos. + // + // Returns repos that the calling user has Manage permissions on. Use + // `next_page_token` to iterate through additional pages. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListReposRequest) ([]RepoInfo, error) + + // RepoInfoPathToIdMap calls [ReposPreviewAPI.ListAll] and creates a map of results with [RepoInfo].Path as key and [RepoInfo].Id as value. + // + // Returns an error if there's more than one [RepoInfo] with the same .Path. + // + // Note: All [RepoInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + RepoInfoPathToIdMap(ctx context.Context, request ListReposRequest) (map[string]int64, error) + + // GetByPath calls [ReposPreviewAPI.RepoInfoPathToIdMap] and returns a single [RepoInfo]. + // + // Returns an error if there's more than one [RepoInfo] with the same .Path. + // + // Note: All [RepoInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByPath(ctx context.Context, name string) (*RepoInfo, error) + + // Set repo permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their root object. + SetPermissions(ctx context.Context, request RepoPermissionsRequest) (*RepoPermissions, error) + + // Update a repo. + // + // Updates the repo to a different branch or tag, or updates the repo to the + // latest commit on the same branch. + Update(ctx context.Context, request UpdateRepoRequest) error + + // Update repo permissions. + // + // Updates the permissions on a repo. Repos can inherit permissions from their + // root object. + UpdatePermissions(ctx context.Context, request RepoPermissionsRequest) (*RepoPermissions, error) +} + +func NewReposPreview(client *client.DatabricksClient) *ReposPreviewAPI { + return &ReposPreviewAPI{ + reposPreviewImpl: reposPreviewImpl{ + client: client, + }, + } +} + +// The Repos API allows users to manage their git repos. Users can use the API +// to access all repos that they have manage permissions on. +// +// Databricks Repos is a visual Git client in Databricks. It supports common Git +// operations such a cloning a repository, committing and pushing, pulling, +// branch management, and visual comparison of diffs when committing. +// +// Within Repos you can develop code in notebooks or other files and follow data +// science and engineering code development best practices using Git for version +// control, collaboration, and CI/CD. +type ReposPreviewAPI struct { + reposPreviewImpl +} + +// Delete a repo. +// +// Deletes the specified repo. +func (a *ReposPreviewAPI) DeleteByRepoId(ctx context.Context, repoId int64) error { + return a.reposPreviewImpl.Delete(ctx, DeleteRepoRequest{ + RepoId: repoId, + }) +} + +// Get a repo. +// +// Returns the repo with the given repo ID. +func (a *ReposPreviewAPI) GetByRepoId(ctx context.Context, repoId int64) (*GetRepoResponse, error) { + return a.reposPreviewImpl.Get(ctx, GetRepoRequest{ + RepoId: repoId, + }) +} + +// Get repo permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *ReposPreviewAPI) GetPermissionLevelsByRepoId(ctx context.Context, repoId string) (*GetRepoPermissionLevelsResponse, error) { + return a.reposPreviewImpl.GetPermissionLevels(ctx, GetRepoPermissionLevelsRequest{ + RepoId: repoId, + }) +} + +// Get repo permissions. +// +// Gets the permissions of a repo. Repos can inherit permissions from their root +// object. +func (a *ReposPreviewAPI) GetPermissionsByRepoId(ctx context.Context, repoId string) (*RepoPermissions, error) { + return a.reposPreviewImpl.GetPermissions(ctx, GetRepoPermissionsRequest{ + RepoId: repoId, + }) +} + +// RepoInfoPathToIdMap calls [ReposPreviewAPI.ListAll] and creates a map of results with [RepoInfo].Path as key and [RepoInfo].Id as value. +// +// Returns an error if there's more than one [RepoInfo] with the same .Path. +// +// Note: All [RepoInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ReposPreviewAPI) RepoInfoPathToIdMap(ctx context.Context, request ListReposRequest) (map[string]int64, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]int64{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Path + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Path: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByPath calls [ReposPreviewAPI.RepoInfoPathToIdMap] and returns a single [RepoInfo]. +// +// Returns an error if there's more than one [RepoInfo] with the same .Path. +// +// Note: All [RepoInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *ReposPreviewAPI) GetByPath(ctx context.Context, name string) (*RepoInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListReposRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]RepoInfo{} + for _, v := range result { + key := v.Path + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("RepoInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of RepoInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +type SecretsPreviewInterface interface { + + // Create a new secret scope. + // + // The scope name must consist of alphanumeric characters, dashes, underscores, + // and periods, and may not exceed 128 characters. + CreateScope(ctx context.Context, request CreateScope) error + + // Delete an ACL. + // + // Deletes the given ACL on the given scope. + // + // Users must have the `MANAGE` permission to invoke this API. Throws + // `RESOURCE_DOES_NOT_EXIST` if no such secret scope, principal, or ACL exists. + // Throws `PERMISSION_DENIED` if the user does not have permission to make this + // API call. + DeleteAcl(ctx context.Context, request DeleteAcl) error + + // Delete a secret scope. + // + // Deletes a secret scope. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if the scope does not exist. Throws + // `PERMISSION_DENIED` if the user does not have permission to make this API + // call. + DeleteScope(ctx context.Context, request DeleteScope) error + + // Delete a secret scope. + // + // Deletes a secret scope. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if the scope does not exist. Throws + // `PERMISSION_DENIED` if the user does not have permission to make this API + // call. + DeleteScopeByScope(ctx context.Context, scope string) error + + // Delete a secret. + // + // Deletes the secret stored in this secret scope. You must have `WRITE` or + // `MANAGE` permission on the secret scope. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope or secret exists. + // Throws `PERMISSION_DENIED` if the user does not have permission to make this + // API call. + DeleteSecret(ctx context.Context, request DeleteSecret) error + + // Get secret ACL details. + // + // Gets the details about the given ACL, such as the group and permission. Users + // must have the `MANAGE` permission to invoke this API. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws + // `PERMISSION_DENIED` if the user does not have permission to make this API + // call. + GetAcl(ctx context.Context, request GetAclRequest) (*AclItem, error) + + // Get a secret. + // + // Gets the bytes representation of a secret value for the specified scope and + // key. + // + // Users need the READ permission to make this call. + // + // Note that the secret value returned is in bytes. The interpretation of the + // bytes is determined by the caller in DBUtils and the type the data is decoded + // into. + // + // Throws ``PERMISSION_DENIED`` if the user does not have permission to make + // this API call. Throws ``RESOURCE_DOES_NOT_EXIST`` if no such secret or secret + // scope exists. + GetSecret(ctx context.Context, request GetSecretRequest) (*GetSecretResponse, error) + + // Lists ACLs. + // + // List the ACLs for a given secret scope. Users must have the `MANAGE` + // permission to invoke this API. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws + // `PERMISSION_DENIED` if the user does not have permission to make this API + // call. + // + // This method is generated by Databricks SDK Code Generator. + ListAcls(ctx context.Context, request ListAclsRequest) listing.Iterator[AclItem] + + // Lists ACLs. + // + // List the ACLs for a given secret scope. Users must have the `MANAGE` + // permission to invoke this API. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws + // `PERMISSION_DENIED` if the user does not have permission to make this API + // call. + // + // This method is generated by Databricks SDK Code Generator. + ListAclsAll(ctx context.Context, request ListAclsRequest) ([]AclItem, error) + + // Lists ACLs. + // + // List the ACLs for a given secret scope. Users must have the `MANAGE` + // permission to invoke this API. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws + // `PERMISSION_DENIED` if the user does not have permission to make this API + // call. + ListAclsByScope(ctx context.Context, scope string) (*ListAclsResponse, error) + + // List all scopes. + // + // Lists all secret scopes available in the workspace. + // + // Throws `PERMISSION_DENIED` if the user does not have permission to make this + // API call. + // + // This method is generated by Databricks SDK Code Generator. + ListScopes(ctx context.Context) listing.Iterator[SecretScope] + + // List all scopes. + // + // Lists all secret scopes available in the workspace. + // + // Throws `PERMISSION_DENIED` if the user does not have permission to make this + // API call. + // + // This method is generated by Databricks SDK Code Generator. + ListScopesAll(ctx context.Context) ([]SecretScope, error) + + // List secret keys. + // + // Lists the secret keys that are stored at this scope. This is a metadata-only + // operation; secret data cannot be retrieved using this API. Users need the + // READ permission to make this call. + // + // The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws + // `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws + // `PERMISSION_DENIED` if the user does not have permission to make this API + // call. + // + // This method is generated by Databricks SDK Code Generator. + ListSecrets(ctx context.Context, request ListSecretsRequest) listing.Iterator[SecretMetadata] + + // List secret keys. + // + // Lists the secret keys that are stored at this scope. This is a metadata-only + // operation; secret data cannot be retrieved using this API. Users need the + // READ permission to make this call. + // + // The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws + // `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws + // `PERMISSION_DENIED` if the user does not have permission to make this API + // call. + // + // This method is generated by Databricks SDK Code Generator. + ListSecretsAll(ctx context.Context, request ListSecretsRequest) ([]SecretMetadata, error) + + // List secret keys. + // + // Lists the secret keys that are stored at this scope. This is a metadata-only + // operation; secret data cannot be retrieved using this API. Users need the + // READ permission to make this call. + // + // The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws + // `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws + // `PERMISSION_DENIED` if the user does not have permission to make this API + // call. + ListSecretsByScope(ctx context.Context, scope string) (*ListSecretsResponse, error) + + // Create/update an ACL. + // + // Creates or overwrites the Access Control List (ACL) associated with the given + // principal (user or group) on the specified scope point. + // + // In general, a user or group will use the most powerful permission available + // to them, and permissions are ordered as follows: + // + // * `MANAGE` - Allowed to change ACLs, and read and write to this secret scope. + // * `WRITE` - Allowed to read and write to this secret scope. * `READ` - + // Allowed to read this secret scope and list what secrets are available. + // + // Note that in general, secret values can only be read from within a command on + // a cluster (for example, through a notebook). There is no API to read the + // actual secret value material outside of a cluster. However, the user's + // permission will be applied based on who is executing the command, and they + // must have at least READ permission. + // + // Users must have the `MANAGE` permission to invoke this API. + // + // The principal is a user or group name corresponding to an existing Databricks + // principal to be granted or revoked access. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws + // `RESOURCE_ALREADY_EXISTS` if a permission for the principal already exists. + // Throws `INVALID_PARAMETER_VALUE` if the permission or principal is invalid. + // Throws `PERMISSION_DENIED` if the user does not have permission to make this + // API call. + PutAcl(ctx context.Context, request PutAcl) error + + // Add a secret. + // + // Inserts a secret under the provided scope with the given name. If a secret + // already exists with the same name, this command overwrites the existing + // secret's value. The server encrypts the secret using the secret scope's + // encryption settings before storing it. + // + // You must have `WRITE` or `MANAGE` permission on the secret scope. The secret + // key must consist of alphanumeric characters, dashes, underscores, and + // periods, and cannot exceed 128 characters. The maximum allowed secret value + // size is 128 KB. The maximum number of secrets in a given scope is 1000. + // + // The input fields "string_value" or "bytes_value" specify the type of the + // secret, which will determine the value returned when the secret value is + // requested. Exactly one must be specified. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws + // `RESOURCE_LIMIT_EXCEEDED` if maximum number of secrets in scope is exceeded. + // Throws `INVALID_PARAMETER_VALUE` if the key name or value length is invalid. + // Throws `PERMISSION_DENIED` if the user does not have permission to make this + // API call. + PutSecret(ctx context.Context, request PutSecret) error +} + +func NewSecretsPreview(client *client.DatabricksClient) *SecretsPreviewAPI { + return &SecretsPreviewAPI{ + secretsPreviewImpl: secretsPreviewImpl{ + client: client, + }, + } +} + +// The Secrets API allows you to manage secrets, secret scopes, and access +// permissions. +// +// Sometimes accessing data requires that you authenticate to external data +// sources through JDBC. Instead of directly entering your credentials into a +// notebook, use Databricks secrets to store your credentials and reference them +// in notebooks and jobs. +// +// Administrators, secret creators, and users granted permission can read +// Databricks secrets. While Databricks makes an effort to redact secret values +// that might be displayed in notebooks, it is not possible to prevent such +// users from reading secrets. +type SecretsPreviewAPI struct { + secretsPreviewImpl +} + +// Delete a secret scope. +// +// Deletes a secret scope. +// +// Throws `RESOURCE_DOES_NOT_EXIST` if the scope does not exist. Throws +// `PERMISSION_DENIED` if the user does not have permission to make this API +// call. +func (a *SecretsPreviewAPI) DeleteScopeByScope(ctx context.Context, scope string) error { + return a.secretsPreviewImpl.DeleteScope(ctx, DeleteScope{ + Scope: scope, + }) +} + +// Lists ACLs. +// +// List the ACLs for a given secret scope. Users must have the `MANAGE` +// permission to invoke this API. +// +// Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws +// `PERMISSION_DENIED` if the user does not have permission to make this API +// call. +func (a *SecretsPreviewAPI) ListAclsByScope(ctx context.Context, scope string) (*ListAclsResponse, error) { + return a.secretsPreviewImpl.internalListAcls(ctx, ListAclsRequest{ + Scope: scope, + }) +} + +// List secret keys. +// +// Lists the secret keys that are stored at this scope. This is a metadata-only +// operation; secret data cannot be retrieved using this API. Users need the +// READ permission to make this call. +// +// The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws +// `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws +// `PERMISSION_DENIED` if the user does not have permission to make this API +// call. +func (a *SecretsPreviewAPI) ListSecretsByScope(ctx context.Context, scope string) (*ListSecretsResponse, error) { + return a.secretsPreviewImpl.internalListSecrets(ctx, ListSecretsRequest{ + Scope: scope, + }) +} + +type WorkspacePreviewInterface interface { + + // Delete a workspace object. + // + // Deletes an object or a directory (and optionally recursively deletes all + // objects in the directory). * If `path` does not exist, this call returns an + // error `RESOURCE_DOES_NOT_EXIST`. * If `path` is a non-empty directory and + // `recursive` is set to `false`, this call returns an error + // `DIRECTORY_NOT_EMPTY`. + // + // Object deletion cannot be undone and deleting a directory recursively is not + // atomic. + Delete(ctx context.Context, request Delete) error + + // Export a workspace object. + // + // Exports an object or the contents of an entire directory. + // + // If `path` does not exist, this call returns an error + // `RESOURCE_DOES_NOT_EXIST`. + // + // If the exported data would exceed size limit, this call returns + // `MAX_NOTEBOOK_SIZE_EXCEEDED`. Currently, this API does not support exporting + // a library. + Export(ctx context.Context, request ExportRequest) (*ExportResponse, error) + + // Get workspace object permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevels(ctx context.Context, request GetWorkspaceObjectPermissionLevelsRequest) (*GetWorkspaceObjectPermissionLevelsResponse, error) + + // Get workspace object permission levels. + // + // Gets the permission levels that a user can have on an object. + GetPermissionLevelsByWorkspaceObjectTypeAndWorkspaceObjectId(ctx context.Context, workspaceObjectType string, workspaceObjectId string) (*GetWorkspaceObjectPermissionLevelsResponse, error) + + // Get workspace object permissions. + // + // Gets the permissions of a workspace object. Workspace objects can inherit + // permissions from their parent objects or root object. + GetPermissions(ctx context.Context, request GetWorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) + + // Get workspace object permissions. + // + // Gets the permissions of a workspace object. Workspace objects can inherit + // permissions from their parent objects or root object. + GetPermissionsByWorkspaceObjectTypeAndWorkspaceObjectId(ctx context.Context, workspaceObjectType string, workspaceObjectId string) (*WorkspaceObjectPermissions, error) + + // Get status. + // + // Gets the status of an object or a directory. If `path` does not exist, this + // call returns an error `RESOURCE_DOES_NOT_EXIST`. + GetStatus(ctx context.Context, request GetStatusRequest) (*ObjectInfo, error) + + // Get status. + // + // Gets the status of an object or a directory. If `path` does not exist, this + // call returns an error `RESOURCE_DOES_NOT_EXIST`. + GetStatusByPath(ctx context.Context, path string) (*ObjectInfo, error) + + // Import a workspace object. + // + // Imports a workspace object (for example, a notebook or file) or the contents + // of an entire directory. If `path` already exists and `overwrite` is set to + // `false`, this call returns an error `RESOURCE_ALREADY_EXISTS`. To import a + // directory, you can use either the `DBC` format or the `SOURCE` format with + // the `language` field unset. To import a single file as `SOURCE`, you must set + // the `language` field. + Import(ctx context.Context, request Import) error + + // List contents. + // + // Lists the contents of a directory, or the object if it is not a directory. If + // the input path does not exist, this call returns an error + // `RESOURCE_DOES_NOT_EXIST`. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListWorkspaceRequest) listing.Iterator[ObjectInfo] + + // List contents. + // + // Lists the contents of a directory, or the object if it is not a directory. If + // the input path does not exist, this call returns an error + // `RESOURCE_DOES_NOT_EXIST`. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListWorkspaceRequest) ([]ObjectInfo, error) + + // ObjectInfoPathToObjectIdMap calls [WorkspacePreviewAPI.ListAll] and creates a map of results with [ObjectInfo].Path as key and [ObjectInfo].ObjectId as value. + // + // Returns an error if there's more than one [ObjectInfo] with the same .Path. + // + // Note: All [ObjectInfo] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ObjectInfoPathToObjectIdMap(ctx context.Context, request ListWorkspaceRequest) (map[string]int64, error) + + // GetByPath calls [WorkspacePreviewAPI.ObjectInfoPathToObjectIdMap] and returns a single [ObjectInfo]. + // + // Returns an error if there's more than one [ObjectInfo] with the same .Path. + // + // Note: All [ObjectInfo] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByPath(ctx context.Context, name string) (*ObjectInfo, error) + + // Create a directory. + // + // Creates the specified directory (and necessary parent directories if they do + // not exist). If there is an object (not a directory) at any prefix of the + // input path, this call returns an error `RESOURCE_ALREADY_EXISTS`. + // + // Note that if this operation fails it may have succeeded in creating some of + // the necessary parent directories. + Mkdirs(ctx context.Context, request Mkdirs) error + + // Create a directory. + // + // Creates the specified directory (and necessary parent directories if they do + // not exist). If there is an object (not a directory) at any prefix of the + // input path, this call returns an error `RESOURCE_ALREADY_EXISTS`. + // + // Note that if this operation fails it may have succeeded in creating some of + // the necessary parent directories. + MkdirsByPath(ctx context.Context, path string) error + + // Set workspace object permissions. + // + // Sets permissions on an object, replacing existing permissions if they exist. + // Deletes all direct permissions if none are specified. Objects can inherit + // permissions from their parent objects or root object. + SetPermissions(ctx context.Context, request WorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) + + // Update workspace object permissions. + // + // Updates the permissions on a workspace object. Workspace objects can inherit + // permissions from their parent objects or root object. + UpdatePermissions(ctx context.Context, request WorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) +} + +func NewWorkspacePreview(client *client.DatabricksClient) *WorkspacePreviewAPI { + return &WorkspacePreviewAPI{ + workspacePreviewImpl: workspacePreviewImpl{ + client: client, + }, + } +} + +// The Workspace API allows you to list, import, export, and delete notebooks +// and folders. +// +// A notebook is a web-based interface to a document that contains runnable +// code, visualizations, and explanatory text. +type WorkspacePreviewAPI struct { + workspacePreviewImpl +} + +// Get workspace object permission levels. +// +// Gets the permission levels that a user can have on an object. +func (a *WorkspacePreviewAPI) GetPermissionLevelsByWorkspaceObjectTypeAndWorkspaceObjectId(ctx context.Context, workspaceObjectType string, workspaceObjectId string) (*GetWorkspaceObjectPermissionLevelsResponse, error) { + return a.workspacePreviewImpl.GetPermissionLevels(ctx, GetWorkspaceObjectPermissionLevelsRequest{ + WorkspaceObjectType: workspaceObjectType, + WorkspaceObjectId: workspaceObjectId, + }) +} + +// Get workspace object permissions. +// +// Gets the permissions of a workspace object. Workspace objects can inherit +// permissions from their parent objects or root object. +func (a *WorkspacePreviewAPI) GetPermissionsByWorkspaceObjectTypeAndWorkspaceObjectId(ctx context.Context, workspaceObjectType string, workspaceObjectId string) (*WorkspaceObjectPermissions, error) { + return a.workspacePreviewImpl.GetPermissions(ctx, GetWorkspaceObjectPermissionsRequest{ + WorkspaceObjectType: workspaceObjectType, + WorkspaceObjectId: workspaceObjectId, + }) +} + +// Get status. +// +// Gets the status of an object or a directory. If `path` does not exist, this +// call returns an error `RESOURCE_DOES_NOT_EXIST`. +func (a *WorkspacePreviewAPI) GetStatusByPath(ctx context.Context, path string) (*ObjectInfo, error) { + return a.workspacePreviewImpl.GetStatus(ctx, GetStatusRequest{ + Path: path, + }) +} + +// ObjectInfoPathToObjectIdMap calls [WorkspacePreviewAPI.ListAll] and creates a map of results with [ObjectInfo].Path as key and [ObjectInfo].ObjectId as value. +// +// Returns an error if there's more than one [ObjectInfo] with the same .Path. +// +// Note: All [ObjectInfo] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *WorkspacePreviewAPI) ObjectInfoPathToObjectIdMap(ctx context.Context, request ListWorkspaceRequest) (map[string]int64, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]int64{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.Path + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .Path: %s", key) + } + mapping[key] = v.ObjectId + } + return mapping, nil +} + +// GetByPath calls [WorkspacePreviewAPI.ObjectInfoPathToObjectIdMap] and returns a single [ObjectInfo]. +// +// Returns an error if there's more than one [ObjectInfo] with the same .Path. +// +// Note: All [ObjectInfo] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *WorkspacePreviewAPI) GetByPath(ctx context.Context, name string) (*ObjectInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListWorkspaceRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ObjectInfo{} + for _, v := range result { + key := v.Path + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ObjectInfo named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ObjectInfo named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +// Create a directory. +// +// Creates the specified directory (and necessary parent directories if they do +// not exist). If there is an object (not a directory) at any prefix of the +// input path, this call returns an error `RESOURCE_ALREADY_EXISTS`. +// +// Note that if this operation fails it may have succeeded in creating some of +// the necessary parent directories. +func (a *WorkspacePreviewAPI) MkdirsByPath(ctx context.Context, path string) error { + return a.workspacePreviewImpl.Mkdirs(ctx, Mkdirs{ + Path: path, + }) +} diff --git a/workspace/v2preview/client.go b/workspace/v2preview/client.go new file mode 100755 index 000000000..778d6675a --- /dev/null +++ b/workspace/v2preview/client.go @@ -0,0 +1,147 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package workspacepreview + +import ( + "errors" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/config" + "github.com/databricks/databricks-sdk-go/databricks/httpclient" +) + +type GitCredentialsPreviewClient struct { + GitCredentialsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewGitCredentialsPreviewClient(cfg *config.Config) (*GitCredentialsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &GitCredentialsPreviewClient{ + Config: cfg, + apiClient: apiClient, + GitCredentialsPreviewInterface: NewGitCredentialsPreview(databricksClient), + }, nil +} + +type ReposPreviewClient struct { + ReposPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewReposPreviewClient(cfg *config.Config) (*ReposPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &ReposPreviewClient{ + Config: cfg, + apiClient: apiClient, + ReposPreviewInterface: NewReposPreview(databricksClient), + }, nil +} + +type SecretsPreviewClient struct { + SecretsPreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewSecretsPreviewClient(cfg *config.Config) (*SecretsPreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &SecretsPreviewClient{ + Config: cfg, + apiClient: apiClient, + SecretsPreviewInterface: NewSecretsPreview(databricksClient), + }, nil +} + +type WorkspacePreviewClient struct { + WorkspacePreviewInterface + Config *config.Config + apiClient *httpclient.ApiClient +} + +func NewWorkspacePreviewClient(cfg *config.Config) (*WorkspacePreviewClient, error) { + if cfg == nil { + cfg = &config.Config{} + } + + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + if cfg.IsAccountClient() { + return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") + } + apiClient, err := cfg.NewApiClient() + if err != nil { + return nil, err + } + databricksClient, err := client.NewWithClient(cfg, apiClient) + if err != nil { + return nil, err + } + + return &WorkspacePreviewClient{ + Config: cfg, + apiClient: apiClient, + WorkspacePreviewInterface: NewWorkspacePreview(databricksClient), + }, nil +} diff --git a/workspace/v2preview/impl.go b/workspace/v2preview/impl.go new file mode 100755 index 000000000..1b28cd3dd --- /dev/null +++ b/workspace/v2preview/impl.go @@ -0,0 +1,614 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package workspacepreview + +import ( + "context" + "fmt" + "net/http" + + "github.com/databricks/databricks-sdk-go/databricks/client" + "github.com/databricks/databricks-sdk-go/databricks/listing" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// unexported type that holds implementations of just GitCredentialsPreview API methods +type gitCredentialsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *gitCredentialsPreviewImpl) Create(ctx context.Context, request CreateCredentialsRequest) (*CreateCredentialsResponse, error) { + var createCredentialsResponse CreateCredentialsResponse + path := "/api/2.0preview/git-credentials" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createCredentialsResponse) + return &createCredentialsResponse, err +} + +func (a *gitCredentialsPreviewImpl) Delete(ctx context.Context, request DeleteCredentialsRequest) error { + var deleteCredentialsResponse DeleteCredentialsResponse + path := fmt.Sprintf("/api/2.0preview/git-credentials/%v", request.CredentialId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteCredentialsResponse) + return err +} + +func (a *gitCredentialsPreviewImpl) Get(ctx context.Context, request GetCredentialsRequest) (*GetCredentialsResponse, error) { + var getCredentialsResponse GetCredentialsResponse + path := fmt.Sprintf("/api/2.0preview/git-credentials/%v", request.CredentialId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getCredentialsResponse) + return &getCredentialsResponse, err +} + +// Get Git credentials. +// +// Lists the calling user's Git credentials. One credential per user is +// supported. +func (a *gitCredentialsPreviewImpl) List(ctx context.Context) listing.Iterator[CredentialInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListCredentialsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListCredentialsResponse) []CredentialInfo { + return resp.Credentials + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get Git credentials. +// +// Lists the calling user's Git credentials. One credential per user is +// supported. +func (a *gitCredentialsPreviewImpl) ListAll(ctx context.Context) ([]CredentialInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[CredentialInfo](ctx, iterator) +} +func (a *gitCredentialsPreviewImpl) internalList(ctx context.Context) (*ListCredentialsResponse, error) { + var listCredentialsResponse ListCredentialsResponse + path := "/api/2.0preview/git-credentials" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listCredentialsResponse) + return &listCredentialsResponse, err +} + +func (a *gitCredentialsPreviewImpl) Update(ctx context.Context, request UpdateCredentialsRequest) error { + var updateCredentialsResponse UpdateCredentialsResponse + path := fmt.Sprintf("/api/2.0preview/git-credentials/%v", request.CredentialId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateCredentialsResponse) + return err +} + +// unexported type that holds implementations of just ReposPreview API methods +type reposPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *reposPreviewImpl) Create(ctx context.Context, request CreateRepoRequest) (*CreateRepoResponse, error) { + var createRepoResponse CreateRepoResponse + path := "/api/2.0preview/repos" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createRepoResponse) + return &createRepoResponse, err +} + +func (a *reposPreviewImpl) Delete(ctx context.Context, request DeleteRepoRequest) error { + var deleteRepoResponse DeleteRepoResponse + path := fmt.Sprintf("/api/2.0preview/repos/%v", request.RepoId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteRepoResponse) + return err +} + +func (a *reposPreviewImpl) Get(ctx context.Context, request GetRepoRequest) (*GetRepoResponse, error) { + var getRepoResponse GetRepoResponse + path := fmt.Sprintf("/api/2.0preview/repos/%v", request.RepoId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getRepoResponse) + return &getRepoResponse, err +} + +func (a *reposPreviewImpl) GetPermissionLevels(ctx context.Context, request GetRepoPermissionLevelsRequest) (*GetRepoPermissionLevelsResponse, error) { + var getRepoPermissionLevelsResponse GetRepoPermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/repos/%v/permissionLevels", request.RepoId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getRepoPermissionLevelsResponse) + return &getRepoPermissionLevelsResponse, err +} + +func (a *reposPreviewImpl) GetPermissions(ctx context.Context, request GetRepoPermissionsRequest) (*RepoPermissions, error) { + var repoPermissions RepoPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/repos/%v", request.RepoId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &repoPermissions) + return &repoPermissions, err +} + +// Get repos. +// +// Returns repos that the calling user has Manage permissions on. Use +// `next_page_token` to iterate through additional pages. +func (a *reposPreviewImpl) List(ctx context.Context, request ListReposRequest) listing.Iterator[RepoInfo] { + + getNextPage := func(ctx context.Context, req ListReposRequest) (*ListReposResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListReposResponse) []RepoInfo { + return resp.Repos + } + getNextReq := func(resp *ListReposResponse) *ListReposRequest { + if resp.NextPageToken == "" { + return nil + } + request.NextPageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get repos. +// +// Returns repos that the calling user has Manage permissions on. Use +// `next_page_token` to iterate through additional pages. +func (a *reposPreviewImpl) ListAll(ctx context.Context, request ListReposRequest) ([]RepoInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[RepoInfo](ctx, iterator) +} +func (a *reposPreviewImpl) internalList(ctx context.Context, request ListReposRequest) (*ListReposResponse, error) { + var listReposResponse ListReposResponse + path := "/api/2.0preview/repos" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listReposResponse) + return &listReposResponse, err +} + +func (a *reposPreviewImpl) SetPermissions(ctx context.Context, request RepoPermissionsRequest) (*RepoPermissions, error) { + var repoPermissions RepoPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/repos/%v", request.RepoId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &repoPermissions) + return &repoPermissions, err +} + +func (a *reposPreviewImpl) Update(ctx context.Context, request UpdateRepoRequest) error { + var updateRepoResponse UpdateRepoResponse + path := fmt.Sprintf("/api/2.0preview/repos/%v", request.RepoId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateRepoResponse) + return err +} + +func (a *reposPreviewImpl) UpdatePermissions(ctx context.Context, request RepoPermissionsRequest) (*RepoPermissions, error) { + var repoPermissions RepoPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/repos/%v", request.RepoId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &repoPermissions) + return &repoPermissions, err +} + +// unexported type that holds implementations of just SecretsPreview API methods +type secretsPreviewImpl struct { + client *client.DatabricksClient +} + +func (a *secretsPreviewImpl) CreateScope(ctx context.Context, request CreateScope) error { + var createScopeResponse CreateScopeResponse + path := "/api/2.0preview/secrets/scopes/create" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createScopeResponse) + return err +} + +func (a *secretsPreviewImpl) DeleteAcl(ctx context.Context, request DeleteAcl) error { + var deleteAclResponse DeleteAclResponse + path := "/api/2.0preview/secrets/acls/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteAclResponse) + return err +} + +func (a *secretsPreviewImpl) DeleteScope(ctx context.Context, request DeleteScope) error { + var deleteScopeResponse DeleteScopeResponse + path := "/api/2.0preview/secrets/scopes/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteScopeResponse) + return err +} + +func (a *secretsPreviewImpl) DeleteSecret(ctx context.Context, request DeleteSecret) error { + var deleteSecretResponse DeleteSecretResponse + path := "/api/2.0preview/secrets/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteSecretResponse) + return err +} + +func (a *secretsPreviewImpl) GetAcl(ctx context.Context, request GetAclRequest) (*AclItem, error) { + var aclItem AclItem + path := "/api/2.0preview/secrets/acls/get" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &aclItem) + return &aclItem, err +} + +func (a *secretsPreviewImpl) GetSecret(ctx context.Context, request GetSecretRequest) (*GetSecretResponse, error) { + var getSecretResponse GetSecretResponse + path := "/api/2.0preview/secrets/get" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getSecretResponse) + return &getSecretResponse, err +} + +// Lists ACLs. +// +// List the ACLs for a given secret scope. Users must have the `MANAGE` +// permission to invoke this API. +// +// Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws +// `PERMISSION_DENIED` if the user does not have permission to make this API +// call. +func (a *secretsPreviewImpl) ListAcls(ctx context.Context, request ListAclsRequest) listing.Iterator[AclItem] { + + getNextPage := func(ctx context.Context, req ListAclsRequest) (*ListAclsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListAcls(ctx, req) + } + getItems := func(resp *ListAclsResponse) []AclItem { + return resp.Items + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Lists ACLs. +// +// List the ACLs for a given secret scope. Users must have the `MANAGE` +// permission to invoke this API. +// +// Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws +// `PERMISSION_DENIED` if the user does not have permission to make this API +// call. +func (a *secretsPreviewImpl) ListAclsAll(ctx context.Context, request ListAclsRequest) ([]AclItem, error) { + iterator := a.ListAcls(ctx, request) + return listing.ToSlice[AclItem](ctx, iterator) +} +func (a *secretsPreviewImpl) internalListAcls(ctx context.Context, request ListAclsRequest) (*ListAclsResponse, error) { + var listAclsResponse ListAclsResponse + path := "/api/2.0preview/secrets/acls/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAclsResponse) + return &listAclsResponse, err +} + +// List all scopes. +// +// Lists all secret scopes available in the workspace. +// +// Throws `PERMISSION_DENIED` if the user does not have permission to make this +// API call. +func (a *secretsPreviewImpl) ListScopes(ctx context.Context) listing.Iterator[SecretScope] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListScopesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListScopes(ctx) + } + getItems := func(resp *ListScopesResponse) []SecretScope { + return resp.Scopes + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List all scopes. +// +// Lists all secret scopes available in the workspace. +// +// Throws `PERMISSION_DENIED` if the user does not have permission to make this +// API call. +func (a *secretsPreviewImpl) ListScopesAll(ctx context.Context) ([]SecretScope, error) { + iterator := a.ListScopes(ctx) + return listing.ToSlice[SecretScope](ctx, iterator) +} +func (a *secretsPreviewImpl) internalListScopes(ctx context.Context) (*ListScopesResponse, error) { + var listScopesResponse ListScopesResponse + path := "/api/2.0preview/secrets/scopes/list" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listScopesResponse) + return &listScopesResponse, err +} + +// List secret keys. +// +// Lists the secret keys that are stored at this scope. This is a metadata-only +// operation; secret data cannot be retrieved using this API. Users need the +// READ permission to make this call. +// +// The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws +// `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws +// `PERMISSION_DENIED` if the user does not have permission to make this API +// call. +func (a *secretsPreviewImpl) ListSecrets(ctx context.Context, request ListSecretsRequest) listing.Iterator[SecretMetadata] { + + getNextPage := func(ctx context.Context, req ListSecretsRequest) (*ListSecretsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListSecrets(ctx, req) + } + getItems := func(resp *ListSecretsResponse) []SecretMetadata { + return resp.Secrets + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List secret keys. +// +// Lists the secret keys that are stored at this scope. This is a metadata-only +// operation; secret data cannot be retrieved using this API. Users need the +// READ permission to make this call. +// +// The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws +// `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws +// `PERMISSION_DENIED` if the user does not have permission to make this API +// call. +func (a *secretsPreviewImpl) ListSecretsAll(ctx context.Context, request ListSecretsRequest) ([]SecretMetadata, error) { + iterator := a.ListSecrets(ctx, request) + return listing.ToSlice[SecretMetadata](ctx, iterator) +} +func (a *secretsPreviewImpl) internalListSecrets(ctx context.Context, request ListSecretsRequest) (*ListSecretsResponse, error) { + var listSecretsResponse ListSecretsResponse + path := "/api/2.0preview/secrets/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSecretsResponse) + return &listSecretsResponse, err +} + +func (a *secretsPreviewImpl) PutAcl(ctx context.Context, request PutAcl) error { + var putAclResponse PutAclResponse + path := "/api/2.0preview/secrets/acls/put" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &putAclResponse) + return err +} + +func (a *secretsPreviewImpl) PutSecret(ctx context.Context, request PutSecret) error { + var putSecretResponse PutSecretResponse + path := "/api/2.0preview/secrets/put" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &putSecretResponse) + return err +} + +// unexported type that holds implementations of just WorkspacePreview API methods +type workspacePreviewImpl struct { + client *client.DatabricksClient +} + +func (a *workspacePreviewImpl) Delete(ctx context.Context, request Delete) error { + var deleteResponse DeleteResponse + path := "/api/2.0preview/workspace/delete" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *workspacePreviewImpl) Export(ctx context.Context, request ExportRequest) (*ExportResponse, error) { + var exportResponse ExportResponse + path := "/api/2.0preview/workspace/export" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &exportResponse) + return &exportResponse, err +} + +func (a *workspacePreviewImpl) GetPermissionLevels(ctx context.Context, request GetWorkspaceObjectPermissionLevelsRequest) (*GetWorkspaceObjectPermissionLevelsResponse, error) { + var getWorkspaceObjectPermissionLevelsResponse GetWorkspaceObjectPermissionLevelsResponse + path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v/permissionLevels", request.WorkspaceObjectType, request.WorkspaceObjectId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getWorkspaceObjectPermissionLevelsResponse) + return &getWorkspaceObjectPermissionLevelsResponse, err +} + +func (a *workspacePreviewImpl) GetPermissions(ctx context.Context, request GetWorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { + var workspaceObjectPermissions WorkspaceObjectPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.WorkspaceObjectType, request.WorkspaceObjectId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &workspaceObjectPermissions) + return &workspaceObjectPermissions, err +} + +func (a *workspacePreviewImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*ObjectInfo, error) { + var objectInfo ObjectInfo + path := "/api/2.0preview/workspace/get-status" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &objectInfo) + return &objectInfo, err +} + +func (a *workspacePreviewImpl) Import(ctx context.Context, request Import) error { + var importResponse ImportResponse + path := "/api/2.0preview/workspace/import" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &importResponse) + return err +} + +// List contents. +// +// Lists the contents of a directory, or the object if it is not a directory. If +// the input path does not exist, this call returns an error +// `RESOURCE_DOES_NOT_EXIST`. +func (a *workspacePreviewImpl) List(ctx context.Context, request ListWorkspaceRequest) listing.Iterator[ObjectInfo] { + + getNextPage := func(ctx context.Context, req ListWorkspaceRequest) (*ListResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListResponse) []ObjectInfo { + return resp.Objects + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List contents. +// +// Lists the contents of a directory, or the object if it is not a directory. If +// the input path does not exist, this call returns an error +// `RESOURCE_DOES_NOT_EXIST`. +func (a *workspacePreviewImpl) ListAll(ctx context.Context, request ListWorkspaceRequest) ([]ObjectInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ObjectInfo](ctx, iterator) +} +func (a *workspacePreviewImpl) internalList(ctx context.Context, request ListWorkspaceRequest) (*ListResponse, error) { + var listResponse ListResponse + path := "/api/2.0preview/workspace/list" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listResponse) + return &listResponse, err +} + +func (a *workspacePreviewImpl) Mkdirs(ctx context.Context, request Mkdirs) error { + var mkdirsResponse MkdirsResponse + path := "/api/2.0preview/workspace/mkdirs" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &mkdirsResponse) + return err +} + +func (a *workspacePreviewImpl) SetPermissions(ctx context.Context, request WorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { + var workspaceObjectPermissions WorkspaceObjectPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.WorkspaceObjectType, request.WorkspaceObjectId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &workspaceObjectPermissions) + return &workspaceObjectPermissions, err +} + +func (a *workspacePreviewImpl) UpdatePermissions(ctx context.Context, request WorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { + var workspaceObjectPermissions WorkspaceObjectPermissions + path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.WorkspaceObjectType, request.WorkspaceObjectId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &workspaceObjectPermissions) + return &workspaceObjectPermissions, err +} diff --git a/workspace/v2preview/model.go b/workspace/v2preview/model.go new file mode 100755 index 000000000..7b3ed1778 --- /dev/null +++ b/workspace/v2preview/model.go @@ -0,0 +1,1301 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package workspacepreview + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/databricks/marshal" +) + +type AclItem struct { + // The permission level applied to the principal. + Permission AclPermission `json:"permission"` + // The principal in which the permission is applied. + Principal string `json:"principal"` +} + +type AclPermission string + +const AclPermissionManage AclPermission = `MANAGE` + +const AclPermissionRead AclPermission = `READ` + +const AclPermissionWrite AclPermission = `WRITE` + +// String representation for [fmt.Print] +func (f *AclPermission) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AclPermission) Set(v string) error { + switch v { + case `MANAGE`, `READ`, `WRITE`: + *f = AclPermission(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MANAGE", "READ", "WRITE"`, v) + } +} + +// Type always returns AclPermission to satisfy [pflag.Value] interface +func (f *AclPermission) Type() string { + return "AclPermission" +} + +type AzureKeyVaultSecretScopeMetadata struct { + // The DNS of the KeyVault + DnsName string `json:"dns_name"` + // The resource id of the azure KeyVault that user wants to associate the + // scope with. + ResourceId string `json:"resource_id"` +} + +type CreateCredentialsRequest struct { + // Git provider. This field is case-insensitive. The available Git providers + // are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, + // `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and + // `awsCodeCommit`. + GitProvider string `json:"git_provider"` + // The username or email provided with your Git provider account, depending + // on which provider you are using. For GitHub, GitHub Enterprise Server, or + // Azure DevOps Services, either email or username may be used. For GitLab, + // GitLab Enterprise Edition, email must be used. For AWS CodeCommit, + // BitBucket or BitBucket Server, username must be used. For all other + // providers please see your provider's Personal Access Token authentication + // documentation to see what is supported. + GitUsername string `json:"git_username,omitempty"` + // The personal access token used to authenticate to the corresponding Git + // provider. For certain providers, support may exist for other types of + // scoped access tokens. [Learn more]. + // + // [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html + PersonalAccessToken string `json:"personal_access_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateCredentialsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateCredentialsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateCredentialsResponse struct { + // ID of the credential object in the workspace. + CredentialId int64 `json:"credential_id"` + // The Git provider associated with the credential. + GitProvider string `json:"git_provider"` + // The username or email provided with your Git provider account and + // associated with the credential. + GitUsername string `json:"git_username,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateCredentialsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateCredentialsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateRepoRequest struct { + // Desired path for the repo in the workspace. Almost any path in the + // workspace can be chosen. If repo is created in `/Repos`, path must be in + // the format `/Repos/{folder}/{repo-name}`. + Path string `json:"path,omitempty"` + // Git provider. This field is case-insensitive. The available Git providers + // are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, + // `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and + // `awsCodeCommit`. + Provider string `json:"provider"` + // If specified, the repo will be created with sparse checkout enabled. You + // cannot enable/disable sparse checkout after the repo is created. + SparseCheckout *SparseCheckout `json:"sparse_checkout,omitempty"` + // URL of the Git repository to be linked. + Url string `json:"url"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateRepoRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateRepoRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateRepoResponse struct { + // Branch that the Git folder (repo) is checked out to. + Branch string `json:"branch,omitempty"` + // SHA-1 hash representing the commit ID of the current HEAD of the Git + // folder (repo). + HeadCommitId string `json:"head_commit_id,omitempty"` + // ID of the Git folder (repo) object in the workspace. + Id int64 `json:"id,omitempty"` + // Path of the Git folder (repo) in the workspace. + Path string `json:"path,omitempty"` + // Git provider of the linked Git repository. + Provider string `json:"provider,omitempty"` + // Sparse checkout settings for the Git folder (repo). + SparseCheckout *SparseCheckout `json:"sparse_checkout,omitempty"` + // URL of the linked Git repository. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateRepoResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateRepoResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateScope struct { + // The metadata for the secret scope if the type is `AZURE_KEYVAULT` + BackendAzureKeyvault *AzureKeyVaultSecretScopeMetadata `json:"backend_azure_keyvault,omitempty"` + // The principal that is initially granted `MANAGE` permission to the + // created scope. + InitialManagePrincipal string `json:"initial_manage_principal,omitempty"` + // Scope name requested by the user. Scope names are unique. + Scope string `json:"scope"` + // The backend type the scope will be created with. If not specified, will + // default to `DATABRICKS` + ScopeBackendType ScopeBackendType `json:"scope_backend_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateScope) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateScope) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateScopeResponse struct { +} + +type CredentialInfo struct { + // ID of the credential object in the workspace. + CredentialId int64 `json:"credential_id"` + // The Git provider associated with the credential. + GitProvider string `json:"git_provider,omitempty"` + // The username or email provided with your Git provider account and + // associated with the credential. + GitUsername string `json:"git_username,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CredentialInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CredentialInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Delete struct { + // The absolute path of the notebook or directory. + Path string `json:"path"` + // The flag that specifies whether to delete the object recursively. It is + // `false` by default. Please note this deleting directory is not atomic. If + // it fails in the middle, some of objects under this directory may be + // deleted and cannot be undone. + Recursive bool `json:"recursive,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Delete) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Delete) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DeleteAcl struct { + // The principal to remove an existing ACL from. + Principal string `json:"principal"` + // The name of the scope to remove permissions from. + Scope string `json:"scope"` +} + +type DeleteAclResponse struct { +} + +// Delete a credential +type DeleteCredentialsRequest struct { + // The ID for the corresponding credential to access. + CredentialId int64 `json:"-" url:"-"` +} + +type DeleteCredentialsResponse struct { +} + +// Delete a repo +type DeleteRepoRequest struct { + // The ID for the corresponding repo to delete. + RepoId int64 `json:"-" url:"-"` +} + +type DeleteRepoResponse struct { +} + +type DeleteResponse struct { +} + +type DeleteScope struct { + // Name of the scope to delete. + Scope string `json:"scope"` +} + +type DeleteScopeResponse struct { +} + +type DeleteSecret struct { + // Name of the secret to delete. + Key string `json:"key"` + // The name of the scope that contains the secret to delete. + Scope string `json:"scope"` +} + +type DeleteSecretResponse struct { +} + +type ExportFormat string + +const ExportFormatAuto ExportFormat = `AUTO` + +const ExportFormatDbc ExportFormat = `DBC` + +const ExportFormatHtml ExportFormat = `HTML` + +const ExportFormatJupyter ExportFormat = `JUPYTER` + +const ExportFormatRMarkdown ExportFormat = `R_MARKDOWN` + +const ExportFormatSource ExportFormat = `SOURCE` + +// String representation for [fmt.Print] +func (f *ExportFormat) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExportFormat) Set(v string) error { + switch v { + case `AUTO`, `DBC`, `HTML`, `JUPYTER`, `R_MARKDOWN`, `SOURCE`: + *f = ExportFormat(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AUTO", "DBC", "HTML", "JUPYTER", "R_MARKDOWN", "SOURCE"`, v) + } +} + +// Type always returns ExportFormat to satisfy [pflag.Value] interface +func (f *ExportFormat) Type() string { + return "ExportFormat" +} + +// Export a workspace object +type ExportRequest struct { + // Flag to enable direct download. If it is `true`, the response is the + // exported file itself. Otherwise, by default, the response contains + // content in the form of a base64 encoded string. + DirectDownload bool `json:"-" url:"direct_download,omitempty"` + // This specifies the format of the exported file. By default, this is + // `SOURCE`. + // + // The value is case sensitive. + // + // - `SOURCE`: The notebook is exported as source code. Directory exports + // will not include non-notebook entries. - `HTML`: The notebook is exported + // as an HTML file. - `JUPYTER`: The notebook is exported as a + // Jupyter/IPython Notebook file. - `DBC`: The notebook is exported in + // Databricks archive format. Directory exports will not include + // non-notebook entries. - `R_MARKDOWN`: The notebook is exported to R + // Markdown format. - `AUTO`: The object or directory is exported depending + // on the objects type. Directory exports will include notebooks and + // workspace files. + Format ExportFormat `json:"-" url:"format,omitempty"` + // The absolute path of the object or directory. Exporting a directory is + // only supported for the `DBC`, `SOURCE`, and `AUTO` format. + Path string `json:"-" url:"path"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExportRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExportRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExportResponse struct { + // The base64-encoded content. If the limit (10MB) is exceeded, exception + // with error code **MAX_NOTEBOOK_SIZE_EXCEEDED** is thrown. + Content string `json:"content,omitempty"` + // The file type of the exported file. + FileType string `json:"file_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExportResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExportResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get secret ACL details +type GetAclRequest struct { + // The principal to fetch ACL information for. + Principal string `json:"-" url:"principal"` + // The name of the scope to fetch ACL information from. + Scope string `json:"-" url:"scope"` +} + +// Get a credential entry +type GetCredentialsRequest struct { + // The ID for the corresponding credential to access. + CredentialId int64 `json:"-" url:"-"` +} + +type GetCredentialsResponse struct { + // ID of the credential object in the workspace. + CredentialId int64 `json:"credential_id"` + // The Git provider associated with the credential. + GitProvider string `json:"git_provider,omitempty"` + // The username or email provided with your Git provider account and + // associated with the credential. + GitUsername string `json:"git_username,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetCredentialsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetCredentialsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get repo permission levels +type GetRepoPermissionLevelsRequest struct { + // The repo for which to get or manage permissions. + RepoId string `json:"-" url:"-"` +} + +type GetRepoPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []RepoPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get repo permissions +type GetRepoPermissionsRequest struct { + // The repo for which to get or manage permissions. + RepoId string `json:"-" url:"-"` +} + +// Get a repo +type GetRepoRequest struct { + // ID of the Git folder (repo) object in the workspace. + RepoId int64 `json:"-" url:"-"` +} + +type GetRepoResponse struct { + // Branch that the local version of the repo is checked out to. + Branch string `json:"branch,omitempty"` + // SHA-1 hash representing the commit ID of the current HEAD of the repo. + HeadCommitId string `json:"head_commit_id,omitempty"` + // ID of the Git folder (repo) object in the workspace. + Id int64 `json:"id,omitempty"` + // Path of the Git folder (repo) in the workspace. + Path string `json:"path,omitempty"` + // Git provider of the linked Git repository. + Provider string `json:"provider,omitempty"` + // Sparse checkout settings for the Git folder (repo). + SparseCheckout *SparseCheckout `json:"sparse_checkout,omitempty"` + // URL of the linked Git repository. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetRepoResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetRepoResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get a secret +type GetSecretRequest struct { + // The key to fetch secret for. + Key string `json:"-" url:"key"` + // The name of the scope to fetch secret information from. + Scope string `json:"-" url:"scope"` +} + +type GetSecretResponse struct { + // A unique name to identify the secret. + Key string `json:"key,omitempty"` + // The value of the secret in its byte representation. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetSecretResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetSecretResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Get status +type GetStatusRequest struct { + // The absolute path of the notebook or directory. + Path string `json:"-" url:"path"` +} + +// Get workspace object permission levels +type GetWorkspaceObjectPermissionLevelsRequest struct { + // The workspace object for which to get or manage permissions. + WorkspaceObjectId string `json:"-" url:"-"` + // The workspace object type for which to get or manage permissions. + WorkspaceObjectType string `json:"-" url:"-"` +} + +type GetWorkspaceObjectPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []WorkspaceObjectPermissionsDescription `json:"permission_levels,omitempty"` +} + +// Get workspace object permissions +type GetWorkspaceObjectPermissionsRequest struct { + // The workspace object for which to get or manage permissions. + WorkspaceObjectId string `json:"-" url:"-"` + // The workspace object type for which to get or manage permissions. + WorkspaceObjectType string `json:"-" url:"-"` +} + +type Import struct { + // The base64-encoded content. This has a limit of 10 MB. + // + // If the limit (10MB) is exceeded, exception with error code + // **MAX_NOTEBOOK_SIZE_EXCEEDED** is thrown. This parameter might be absent, + // and instead a posted file is used. + Content string `json:"content,omitempty"` + // This specifies the format of the file to be imported. + // + // The value is case sensitive. + // + // - `AUTO`: The item is imported depending on an analysis of the item's + // extension and the header content provided in the request. If the item is + // imported as a notebook, then the item's extension is automatically + // removed. - `SOURCE`: The notebook or directory is imported as source + // code. - `HTML`: The notebook is imported as an HTML file. - `JUPYTER`: + // The notebook is imported as a Jupyter/IPython Notebook file. - `DBC`: The + // notebook is imported in Databricks archive format. Required for + // directories. - `R_MARKDOWN`: The notebook is imported from R Markdown + // format. + Format ImportFormat `json:"format,omitempty"` + // The language of the object. This value is set only if the object type is + // `NOTEBOOK`. + Language Language `json:"language,omitempty"` + // The flag that specifies whether to overwrite existing object. It is + // `false` by default. For `DBC` format, `overwrite` is not supported since + // it may contain a directory. + Overwrite bool `json:"overwrite,omitempty"` + // The absolute path of the object or directory. Importing a directory is + // only supported for the `DBC` and `SOURCE` formats. + Path string `json:"path"` + + ForceSendFields []string `json:"-"` +} + +func (s *Import) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Import) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// This specifies the format of the file to be imported. +// +// The value is case sensitive. +// +// - `AUTO`: The item is imported depending on an analysis of the item's +// extension and the header content provided in the request. If the item is +// imported as a notebook, then the item's extension is automatically removed. - +// `SOURCE`: The notebook or directory is imported as source code. - `HTML`: The +// notebook is imported as an HTML file. - `JUPYTER`: The notebook is imported +// as a Jupyter/IPython Notebook file. - `DBC`: The notebook is imported in +// Databricks archive format. Required for directories. - `R_MARKDOWN`: The +// notebook is imported from R Markdown format. +type ImportFormat string + +// The item is imported depending on an analysis of the item's extension and +const ImportFormatAuto ImportFormat = `AUTO` + +// The notebook is imported in archive format. Required for +// directories. +const ImportFormatDbc ImportFormat = `DBC` + +// The notebook is imported as an HTML file. +const ImportFormatHtml ImportFormat = `HTML` + +// The notebook is imported as a Jupyter/IPython Notebook file. +const ImportFormatJupyter ImportFormat = `JUPYTER` + +const ImportFormatRaw ImportFormat = `RAW` + +// The notebook is imported from R Markdown format. +const ImportFormatRMarkdown ImportFormat = `R_MARKDOWN` + +// The notebook or directory is imported as source code. +const ImportFormatSource ImportFormat = `SOURCE` + +// String representation for [fmt.Print] +func (f *ImportFormat) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ImportFormat) Set(v string) error { + switch v { + case `AUTO`, `DBC`, `HTML`, `JUPYTER`, `RAW`, `R_MARKDOWN`, `SOURCE`: + *f = ImportFormat(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AUTO", "DBC", "HTML", "JUPYTER", "RAW", "R_MARKDOWN", "SOURCE"`, v) + } +} + +// Type always returns ImportFormat to satisfy [pflag.Value] interface +func (f *ImportFormat) Type() string { + return "ImportFormat" +} + +type ImportResponse struct { +} + +// The language of the object. This value is set only if the object type is +// `NOTEBOOK`. +type Language string + +const LanguagePython Language = `PYTHON` + +const LanguageR Language = `R` + +const LanguageScala Language = `SCALA` + +const LanguageSql Language = `SQL` + +// String representation for [fmt.Print] +func (f *Language) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Language) Set(v string) error { + switch v { + case `PYTHON`, `R`, `SCALA`, `SQL`: + *f = Language(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PYTHON", "R", "SCALA", "SQL"`, v) + } +} + +// Type always returns Language to satisfy [pflag.Value] interface +func (f *Language) Type() string { + return "Language" +} + +// Lists ACLs +type ListAclsRequest struct { + // The name of the scope to fetch ACL information from. + Scope string `json:"-" url:"scope"` +} + +type ListAclsResponse struct { + // The associated ACLs rule applied to principals in the given scope. + Items []AclItem `json:"items,omitempty"` +} + +type ListCredentialsResponse struct { + // List of credentials. + Credentials []CredentialInfo `json:"credentials,omitempty"` +} + +// Get repos +type ListReposRequest struct { + // Token used to get the next page of results. If not specified, returns the + // first page of results as well as a next page token if there are more + // results. + NextPageToken string `json:"-" url:"next_page_token,omitempty"` + // Filters repos that have paths starting with the given path prefix. If not + // provided or when provided an effectively empty prefix (`/` or + // `/Workspace`) Git folders (repos) from `/Workspace/Repos` will be served. + PathPrefix string `json:"-" url:"path_prefix,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListReposRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListReposRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListReposResponse struct { + // Token that can be specified as a query parameter to the `GET /repos` + // endpoint to retrieve the next page of results. + NextPageToken string `json:"next_page_token,omitempty"` + // List of Git folders (repos). + Repos []RepoInfo `json:"repos,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListReposResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListReposResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListResponse struct { + // List of objects. + Objects []ObjectInfo `json:"objects,omitempty"` +} + +type ListScopesResponse struct { + // The available secret scopes. + Scopes []SecretScope `json:"scopes,omitempty"` +} + +// List secret keys +type ListSecretsRequest struct { + // The name of the scope to list secrets within. + Scope string `json:"-" url:"scope"` +} + +type ListSecretsResponse struct { + // Metadata information of all secrets contained within the given scope. + Secrets []SecretMetadata `json:"secrets,omitempty"` +} + +// List contents +type ListWorkspaceRequest struct { + // UTC timestamp in milliseconds + NotebooksModifiedAfter int `json:"-" url:"notebooks_modified_after,omitempty"` + // The absolute path of the notebook or directory. + Path string `json:"-" url:"path"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListWorkspaceRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListWorkspaceRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Mkdirs struct { + // The absolute path of the directory. If the parent directories do not + // exist, it will also create them. If the directory already exists, this + // command will do nothing and succeed. + Path string `json:"path"` +} + +type MkdirsResponse struct { +} + +type ObjectInfo struct { + // Only applicable to files. The creation UTC timestamp. + CreatedAt int64 `json:"created_at,omitempty"` + // The language of the object. This value is set only if the object type is + // `NOTEBOOK`. + Language Language `json:"language,omitempty"` + // Only applicable to files, the last modified UTC timestamp. + ModifiedAt int64 `json:"modified_at,omitempty"` + // Unique identifier for the object. + ObjectId int64 `json:"object_id,omitempty"` + // The type of the object in workspace. + // + // - `NOTEBOOK`: document that contains runnable code, visualizations, and + // explanatory text. - `DIRECTORY`: directory - `LIBRARY`: library - `FILE`: + // file - `REPO`: repository - `DASHBOARD`: Lakeview dashboard + ObjectType ObjectType `json:"object_type,omitempty"` + // The absolute path of the object. + Path string `json:"path,omitempty"` + // A unique identifier for the object that is consistent across all + // Databricks APIs. + ResourceId string `json:"resource_id,omitempty"` + // Only applicable to files. The file size in bytes can be returned. + Size int64 `json:"size,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ObjectInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ObjectInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of the object in workspace. +// +// - `NOTEBOOK`: document that contains runnable code, visualizations, and +// explanatory text. - `DIRECTORY`: directory - `LIBRARY`: library - `FILE`: +// file - `REPO`: repository - `DASHBOARD`: Lakeview dashboard +type ObjectType string + +// Lakeview dashboard +const ObjectTypeDashboard ObjectType = `DASHBOARD` + +// directory +const ObjectTypeDirectory ObjectType = `DIRECTORY` + +// file +const ObjectTypeFile ObjectType = `FILE` + +// library +const ObjectTypeLibrary ObjectType = `LIBRARY` + +// document that contains runnable code, visualizations, and explanatory text. +const ObjectTypeNotebook ObjectType = `NOTEBOOK` + +// repository +const ObjectTypeRepo ObjectType = `REPO` + +// String representation for [fmt.Print] +func (f *ObjectType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ObjectType) Set(v string) error { + switch v { + case `DASHBOARD`, `DIRECTORY`, `FILE`, `LIBRARY`, `NOTEBOOK`, `REPO`: + *f = ObjectType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DASHBOARD", "DIRECTORY", "FILE", "LIBRARY", "NOTEBOOK", "REPO"`, v) + } +} + +// Type always returns ObjectType to satisfy [pflag.Value] interface +func (f *ObjectType) Type() string { + return "ObjectType" +} + +type PutAcl struct { + // The permission level applied to the principal. + Permission AclPermission `json:"permission"` + // The principal in which the permission is applied. + Principal string `json:"principal"` + // The name of the scope to apply permissions to. + Scope string `json:"scope"` +} + +type PutAclResponse struct { +} + +type PutSecret struct { + // If specified, value will be stored as bytes. + BytesValue string `json:"bytes_value,omitempty"` + // A unique name to identify the secret. + Key string `json:"key"` + // The name of the scope to which the secret will be associated with. + Scope string `json:"scope"` + // If specified, note that the value will be stored in UTF-8 (MB4) form. + StringValue string `json:"string_value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *PutSecret) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PutSecret) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PutSecretResponse struct { +} + +type RepoAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel RepoPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RepoAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RepoAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RepoAccessControlResponse struct { + // All permissions. + AllPermissions []RepoPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RepoAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RepoAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Git folder (repo) information. +type RepoInfo struct { + // Name of the current git branch of the git folder (repo). + Branch string `json:"branch,omitempty"` + // Current git commit id of the git folder (repo). + HeadCommitId string `json:"head_commit_id,omitempty"` + // Id of the git folder (repo) in the Workspace. + Id int64 `json:"id,omitempty"` + // Root path of the git folder (repo) in the Workspace. + Path string `json:"path,omitempty"` + // Git provider of the remote git repository, e.g. `gitHub`. + Provider string `json:"provider,omitempty"` + // Sparse checkout config for the git folder (repo). + SparseCheckout *SparseCheckout `json:"sparse_checkout,omitempty"` + // URL of the remote git repository. + Url string `json:"url,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RepoInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RepoInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RepoPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel RepoPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RepoPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RepoPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type RepoPermissionLevel string + +const RepoPermissionLevelCanEdit RepoPermissionLevel = `CAN_EDIT` + +const RepoPermissionLevelCanManage RepoPermissionLevel = `CAN_MANAGE` + +const RepoPermissionLevelCanRead RepoPermissionLevel = `CAN_READ` + +const RepoPermissionLevelCanRun RepoPermissionLevel = `CAN_RUN` + +// String representation for [fmt.Print] +func (f *RepoPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RepoPermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_READ`, `CAN_RUN`: + *f = RepoPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_READ", "CAN_RUN"`, v) + } +} + +// Type always returns RepoPermissionLevel to satisfy [pflag.Value] interface +func (f *RepoPermissionLevel) Type() string { + return "RepoPermissionLevel" +} + +type RepoPermissions struct { + AccessControlList []RepoAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RepoPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RepoPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RepoPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel RepoPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *RepoPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RepoPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type RepoPermissionsRequest struct { + AccessControlList []RepoAccessControlRequest `json:"access_control_list,omitempty"` + // The repo for which to get or manage permissions. + RepoId string `json:"-" url:"-"` +} + +type ScopeBackendType string + +const ScopeBackendTypeAzureKeyvault ScopeBackendType = `AZURE_KEYVAULT` + +const ScopeBackendTypeDatabricks ScopeBackendType = `DATABRICKS` + +// String representation for [fmt.Print] +func (f *ScopeBackendType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ScopeBackendType) Set(v string) error { + switch v { + case `AZURE_KEYVAULT`, `DATABRICKS`: + *f = ScopeBackendType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AZURE_KEYVAULT", "DATABRICKS"`, v) + } +} + +// Type always returns ScopeBackendType to satisfy [pflag.Value] interface +func (f *ScopeBackendType) Type() string { + return "ScopeBackendType" +} + +type SecretMetadata struct { + // A unique name to identify the secret. + Key string `json:"key,omitempty"` + // The last updated timestamp (in milliseconds) for the secret. + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SecretMetadata) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SecretMetadata) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SecretScope struct { + // The type of secret scope backend. + BackendType ScopeBackendType `json:"backend_type,omitempty"` + // The metadata for the secret scope if the type is `AZURE_KEYVAULT` + KeyvaultMetadata *AzureKeyVaultSecretScopeMetadata `json:"keyvault_metadata,omitempty"` + // A unique name to identify the secret scope. + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SecretScope) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SecretScope) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Sparse checkout configuration, it contains options like cone patterns. +type SparseCheckout struct { + // List of sparse checkout cone patterns, see [cone mode handling] for + // details. + // + // [cone mode handling]: https://git-scm.com/docs/git-sparse-checkout#_internalscone_mode_handling + Patterns []string `json:"patterns,omitempty"` +} + +// Sparse checkout configuration, it contains options like cone patterns. +type SparseCheckoutUpdate struct { + // List of sparse checkout cone patterns, see [cone mode handling] for + // details. + // + // [cone mode handling]: https://git-scm.com/docs/git-sparse-checkout#_internalscone_mode_handling + Patterns []string `json:"patterns,omitempty"` +} + +type UpdateCredentialsRequest struct { + // The ID for the corresponding credential to access. + CredentialId int64 `json:"-" url:"-"` + // Git provider. This field is case-insensitive. The available Git providers + // are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, + // `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and + // `awsCodeCommit`. + GitProvider string `json:"git_provider"` + // The username or email provided with your Git provider account, depending + // on which provider you are using. For GitHub, GitHub Enterprise Server, or + // Azure DevOps Services, either email or username may be used. For GitLab, + // GitLab Enterprise Edition, email must be used. For AWS CodeCommit, + // BitBucket or BitBucket Server, username must be used. For all other + // providers please see your provider's Personal Access Token authentication + // documentation to see what is supported. + GitUsername string `json:"git_username,omitempty"` + // The personal access token used to authenticate to the corresponding Git + // provider. For certain providers, support may exist for other types of + // scoped access tokens. [Learn more]. + // + // [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html + PersonalAccessToken string `json:"personal_access_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateCredentialsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateCredentialsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateCredentialsResponse struct { +} + +type UpdateRepoRequest struct { + // Branch that the local version of the repo is checked out to. + Branch string `json:"branch,omitempty"` + // ID of the Git folder (repo) object in the workspace. + RepoId int64 `json:"-" url:"-"` + // If specified, update the sparse checkout settings. The update will fail + // if sparse checkout is not enabled for the repo. + SparseCheckout *SparseCheckoutUpdate `json:"sparse_checkout,omitempty"` + // Tag that the local version of the repo is checked out to. Updating the + // repo to a tag puts the repo in a detached HEAD state. Before committing + // new changes, you must update the repo to a branch instead of the detached + // HEAD. + Tag string `json:"tag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateRepoRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateRepoRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type UpdateRepoResponse struct { +} + +type WorkspaceObjectAccessControlRequest struct { + // name of the group + GroupName string `json:"group_name,omitempty"` + // Permission level + PermissionLevel WorkspaceObjectPermissionLevel `json:"permission_level,omitempty"` + // application ID of a service principal + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WorkspaceObjectAccessControlRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WorkspaceObjectAccessControlRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type WorkspaceObjectAccessControlResponse struct { + // All permissions. + AllPermissions []WorkspaceObjectPermission `json:"all_permissions,omitempty"` + // Display name of the user or service principal. + DisplayName string `json:"display_name,omitempty"` + // name of the group + GroupName string `json:"group_name,omitempty"` + // Name of the service principal. + ServicePrincipalName string `json:"service_principal_name,omitempty"` + // name of the user + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WorkspaceObjectAccessControlResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WorkspaceObjectAccessControlResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type WorkspaceObjectPermission struct { + Inherited bool `json:"inherited,omitempty"` + + InheritedFromObject []string `json:"inherited_from_object,omitempty"` + // Permission level + PermissionLevel WorkspaceObjectPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WorkspaceObjectPermission) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WorkspaceObjectPermission) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Permission level +type WorkspaceObjectPermissionLevel string + +const WorkspaceObjectPermissionLevelCanEdit WorkspaceObjectPermissionLevel = `CAN_EDIT` + +const WorkspaceObjectPermissionLevelCanManage WorkspaceObjectPermissionLevel = `CAN_MANAGE` + +const WorkspaceObjectPermissionLevelCanRead WorkspaceObjectPermissionLevel = `CAN_READ` + +const WorkspaceObjectPermissionLevelCanRun WorkspaceObjectPermissionLevel = `CAN_RUN` + +// String representation for [fmt.Print] +func (f *WorkspaceObjectPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WorkspaceObjectPermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_READ`, `CAN_RUN`: + *f = WorkspaceObjectPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_READ", "CAN_RUN"`, v) + } +} + +// Type always returns WorkspaceObjectPermissionLevel to satisfy [pflag.Value] interface +func (f *WorkspaceObjectPermissionLevel) Type() string { + return "WorkspaceObjectPermissionLevel" +} + +type WorkspaceObjectPermissions struct { + AccessControlList []WorkspaceObjectAccessControlResponse `json:"access_control_list,omitempty"` + + ObjectId string `json:"object_id,omitempty"` + + ObjectType string `json:"object_type,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WorkspaceObjectPermissions) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WorkspaceObjectPermissions) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type WorkspaceObjectPermissionsDescription struct { + Description string `json:"description,omitempty"` + // Permission level + PermissionLevel WorkspaceObjectPermissionLevel `json:"permission_level,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *WorkspaceObjectPermissionsDescription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WorkspaceObjectPermissionsDescription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type WorkspaceObjectPermissionsRequest struct { + AccessControlList []WorkspaceObjectAccessControlRequest `json:"access_control_list,omitempty"` + // The workspace object for which to get or manage permissions. + WorkspaceObjectId string `json:"-" url:"-"` + // The workspace object type for which to get or manage permissions. + WorkspaceObjectType string `json:"-" url:"-"` +} From 357593875d571c46c4c1f4d06f551f7187da3d9e Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Tue, 11 Feb 2025 12:35:29 +0000 Subject: [PATCH 2/5] remove preview from service names --- apps/v2preview/api.go | 36 +- apps/v2preview/client.go | 14 +- apps/v2preview/impl.go | 40 +- billing/v2preview/api.go | 94 +-- billing/v2preview/client.go | 60 +- billing/v2preview/impl.go | 66 +- catalog/v2preview/api.go | 576 ++++++++-------- catalog/v2preview/client.go | 330 +++++----- catalog/v2preview/impl.go | 402 ++++++------ cleanrooms/v2preview/api.go | 62 +- cleanrooms/v2preview/client.go | 42 +- cleanrooms/v2preview/impl.go | 48 +- compute/v2/impl.go | 1 - compute/v2preview/api.go | 249 +++---- compute/v2preview/client.go | 126 ++-- compute/v2preview/impl.go | 207 +++--- dashboards/v2preview/api.go | 160 ++--- dashboards/v2preview/client.go | 56 +- dashboards/v2preview/impl.go | 108 +-- files/v2preview/api.go | 67 +- files/v2preview/client.go | 28 +- files/v2preview/impl.go | 52 +- iam/v2preview/api.go | 298 ++++----- iam/v2preview/client.go | 172 ++--- iam/v2preview/impl.go | 194 +++--- jobs/v2/model.go | 2 +- jobs/v2preview/api.go | 70 +- jobs/v2preview/client.go | 28 +- jobs/v2preview/impl.go | 66 +- jobs/v2preview/model.go | 2 +- marketplace/v2preview/api.go | 322 ++++----- marketplace/v2preview/client.go | 164 ++--- marketplace/v2preview/impl.go | 212 +++--- ml/v2preview/api.go | 42 +- ml/v2preview/client.go | 28 +- ml/v2preview/impl.go | 184 +++--- oauth2/v2preview/api.go | 118 ++-- oauth2/v2preview/client.go | 70 +- oauth2/v2preview/impl.go | 96 +-- pipelines/v2preview/api.go | 52 +- pipelines/v2preview/client.go | 14 +- pipelines/v2preview/impl.go | 40 +- provisioning/v2preview/api.go | 214 +++--- provisioning/v2preview/client.go | 84 +-- provisioning/v2preview/impl.go | 88 +-- serving/v2preview/api.go | 112 ++-- serving/v2preview/client.go | 48 +- serving/v2preview/impl.go | 77 +-- settings/v2preview/api.go | 403 ++++++------ settings/v2preview/client.go | 342 ++++------ settings/v2preview/impl.go | 278 ++++---- sharing/v2/model.go | 136 +--- sharing/v2preview/api.go | 100 +-- sharing/v2preview/client.go | 56 +- sharing/v2preview/impl.go | 76 +-- sharing/v2preview/model.go | 136 +--- sql/v2preview/api.go | 1047 +++++++++++++++--------------- sql/v2preview/client.go | 196 +++--- sql/v2preview/impl.go | 568 ++++++++-------- vectorsearch/v2preview/api.go | 42 +- vectorsearch/v2preview/client.go | 28 +- vectorsearch/v2preview/impl.go | 44 +- workspace/v2preview/api.go | 139 ++-- workspace/v2preview/client.go | 56 +- workspace/v2preview/impl.go | 110 ++-- 65 files changed, 4475 insertions(+), 4903 deletions(-) diff --git a/apps/v2preview/api.go b/apps/v2preview/api.go index feb34d7e3..a5d19582f 100755 --- a/apps/v2preview/api.go +++ b/apps/v2preview/api.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/listing" ) -type AppsPreviewInterface interface { +type AppsInterface interface { // Create an app. // @@ -138,9 +138,9 @@ type AppsPreviewInterface interface { UpdatePermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error) } -func NewAppsPreview(client *client.DatabricksClient) *AppsPreviewAPI { - return &AppsPreviewAPI{ - appsPreviewImpl: appsPreviewImpl{ +func NewApps(client *client.DatabricksClient) *AppsAPI { + return &AppsAPI{ + appsImpl: appsImpl{ client: client, }, } @@ -149,15 +149,15 @@ func NewAppsPreview(client *client.DatabricksClient) *AppsPreviewAPI { // Apps run directly on a customer’s Databricks instance, integrate with their // data, use and extend Databricks services, and enable users to interact // through single sign-on. -type AppsPreviewAPI struct { - appsPreviewImpl +type AppsAPI struct { + appsImpl } // Delete an app. // // Deletes an app. -func (a *AppsPreviewAPI) DeleteByName(ctx context.Context, name string) (*App, error) { - return a.appsPreviewImpl.Delete(ctx, DeleteAppRequest{ +func (a *AppsAPI) DeleteByName(ctx context.Context, name string) (*App, error) { + return a.appsImpl.Delete(ctx, DeleteAppRequest{ Name: name, }) } @@ -165,8 +165,8 @@ func (a *AppsPreviewAPI) DeleteByName(ctx context.Context, name string) (*App, e // Get an app. // // Retrieves information for the app with the supplied name. -func (a *AppsPreviewAPI) GetByName(ctx context.Context, name string) (*App, error) { - return a.appsPreviewImpl.Get(ctx, GetAppRequest{ +func (a *AppsAPI) GetByName(ctx context.Context, name string) (*App, error) { + return a.appsImpl.Get(ctx, GetAppRequest{ Name: name, }) } @@ -175,8 +175,8 @@ func (a *AppsPreviewAPI) GetByName(ctx context.Context, name string) (*App, erro // // Retrieves information for the app deployment with the supplied name and // deployment id. -func (a *AppsPreviewAPI) GetDeploymentByAppNameAndDeploymentId(ctx context.Context, appName string, deploymentId string) (*AppDeployment, error) { - return a.appsPreviewImpl.GetDeployment(ctx, GetAppDeploymentRequest{ +func (a *AppsAPI) GetDeploymentByAppNameAndDeploymentId(ctx context.Context, appName string, deploymentId string) (*AppDeployment, error) { + return a.appsImpl.GetDeployment(ctx, GetAppDeploymentRequest{ AppName: appName, DeploymentId: deploymentId, }) @@ -185,8 +185,8 @@ func (a *AppsPreviewAPI) GetDeploymentByAppNameAndDeploymentId(ctx context.Conte // Get app permission levels. // // Gets the permission levels that a user can have on an object. -func (a *AppsPreviewAPI) GetPermissionLevelsByAppName(ctx context.Context, appName string) (*GetAppPermissionLevelsResponse, error) { - return a.appsPreviewImpl.GetPermissionLevels(ctx, GetAppPermissionLevelsRequest{ +func (a *AppsAPI) GetPermissionLevelsByAppName(ctx context.Context, appName string) (*GetAppPermissionLevelsResponse, error) { + return a.appsImpl.GetPermissionLevels(ctx, GetAppPermissionLevelsRequest{ AppName: appName, }) } @@ -195,8 +195,8 @@ func (a *AppsPreviewAPI) GetPermissionLevelsByAppName(ctx context.Context, appNa // // Gets the permissions of an app. Apps can inherit permissions from their root // object. -func (a *AppsPreviewAPI) GetPermissionsByAppName(ctx context.Context, appName string) (*AppPermissions, error) { - return a.appsPreviewImpl.GetPermissions(ctx, GetAppPermissionsRequest{ +func (a *AppsAPI) GetPermissionsByAppName(ctx context.Context, appName string) (*AppPermissions, error) { + return a.appsImpl.GetPermissions(ctx, GetAppPermissionsRequest{ AppName: appName, }) } @@ -204,8 +204,8 @@ func (a *AppsPreviewAPI) GetPermissionsByAppName(ctx context.Context, appName st // List app deployments. // // Lists all app deployments for the app with the supplied name. -func (a *AppsPreviewAPI) ListDeploymentsByAppName(ctx context.Context, appName string) (*ListAppDeploymentsResponse, error) { - return a.appsPreviewImpl.internalListDeployments(ctx, ListAppDeploymentsRequest{ +func (a *AppsAPI) ListDeploymentsByAppName(ctx context.Context, appName string) (*ListAppDeploymentsResponse, error) { + return a.appsImpl.internalListDeployments(ctx, ListAppDeploymentsRequest{ AppName: appName, }) } diff --git a/apps/v2preview/client.go b/apps/v2preview/client.go index acb6d473e..037892e7e 100755 --- a/apps/v2preview/client.go +++ b/apps/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type AppsPreviewClient struct { - AppsPreviewInterface +type AppsClient struct { + AppsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewAppsPreviewClient(cfg *config.Config) (*AppsPreviewClient, error) { +func NewAppsClient(cfg *config.Config) (*AppsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,9 +37,9 @@ func NewAppsPreviewClient(cfg *config.Config) (*AppsPreviewClient, error) { return nil, err } - return &AppsPreviewClient{ - Config: cfg, - apiClient: apiClient, - AppsPreviewInterface: NewAppsPreview(databricksClient), + return &AppsClient{ + Config: cfg, + apiClient: apiClient, + AppsInterface: NewApps(databricksClient), }, nil } diff --git a/apps/v2preview/impl.go b/apps/v2preview/impl.go index ad1415fda..3cf98d94c 100755 --- a/apps/v2preview/impl.go +++ b/apps/v2preview/impl.go @@ -13,12 +13,12 @@ import ( "golang.org/x/exp/slices" ) -// unexported type that holds implementations of just AppsPreview API methods -type appsPreviewImpl struct { +// unexported type that holds implementations of just Apps API methods +type appsImpl struct { client *client.DatabricksClient } -func (a *appsPreviewImpl) Create(ctx context.Context, request CreateAppRequest) (*App, error) { +func (a *appsImpl) Create(ctx context.Context, request CreateAppRequest) (*App, error) { var app App path := "/api/2.0preview/apps" queryParams := make(map[string]any) @@ -32,7 +32,7 @@ func (a *appsPreviewImpl) Create(ctx context.Context, request CreateAppRequest) return &app, err } -func (a *appsPreviewImpl) Delete(ctx context.Context, request DeleteAppRequest) (*App, error) { +func (a *appsImpl) Delete(ctx context.Context, request DeleteAppRequest) (*App, error) { var app App path := fmt.Sprintf("/api/2.0preview/apps/%v", request.Name) queryParams := make(map[string]any) @@ -42,7 +42,7 @@ func (a *appsPreviewImpl) Delete(ctx context.Context, request DeleteAppRequest) return &app, err } -func (a *appsPreviewImpl) Deploy(ctx context.Context, request CreateAppDeploymentRequest) (*AppDeployment, error) { +func (a *appsImpl) Deploy(ctx context.Context, request CreateAppDeploymentRequest) (*AppDeployment, error) { var appDeployment AppDeployment path := fmt.Sprintf("/api/2.0preview/apps/%v/deployments", request.AppName) queryParams := make(map[string]any) @@ -53,7 +53,7 @@ func (a *appsPreviewImpl) Deploy(ctx context.Context, request CreateAppDeploymen return &appDeployment, err } -func (a *appsPreviewImpl) Get(ctx context.Context, request GetAppRequest) (*App, error) { +func (a *appsImpl) Get(ctx context.Context, request GetAppRequest) (*App, error) { var app App path := fmt.Sprintf("/api/2.0preview/apps/%v", request.Name) queryParams := make(map[string]any) @@ -63,7 +63,7 @@ func (a *appsPreviewImpl) Get(ctx context.Context, request GetAppRequest) (*App, return &app, err } -func (a *appsPreviewImpl) GetDeployment(ctx context.Context, request GetAppDeploymentRequest) (*AppDeployment, error) { +func (a *appsImpl) GetDeployment(ctx context.Context, request GetAppDeploymentRequest) (*AppDeployment, error) { var appDeployment AppDeployment path := fmt.Sprintf("/api/2.0preview/apps/%v/deployments/%v", request.AppName, request.DeploymentId) queryParams := make(map[string]any) @@ -73,7 +73,7 @@ func (a *appsPreviewImpl) GetDeployment(ctx context.Context, request GetAppDeplo return &appDeployment, err } -func (a *appsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetAppPermissionLevelsRequest) (*GetAppPermissionLevelsResponse, error) { +func (a *appsImpl) GetPermissionLevels(ctx context.Context, request GetAppPermissionLevelsRequest) (*GetAppPermissionLevelsResponse, error) { var getAppPermissionLevelsResponse GetAppPermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/apps/%v/permissionLevels", request.AppName) queryParams := make(map[string]any) @@ -83,7 +83,7 @@ func (a *appsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetAp return &getAppPermissionLevelsResponse, err } -func (a *appsPreviewImpl) GetPermissions(ctx context.Context, request GetAppPermissionsRequest) (*AppPermissions, error) { +func (a *appsImpl) GetPermissions(ctx context.Context, request GetAppPermissionsRequest) (*AppPermissions, error) { var appPermissions AppPermissions path := fmt.Sprintf("/api/2.0preview/permissions/apps/%v", request.AppName) queryParams := make(map[string]any) @@ -96,7 +96,7 @@ func (a *appsPreviewImpl) GetPermissions(ctx context.Context, request GetAppPerm // List apps. // // Lists all apps in the workspace. -func (a *appsPreviewImpl) List(ctx context.Context, request ListAppsRequest) listing.Iterator[App] { +func (a *appsImpl) List(ctx context.Context, request ListAppsRequest) listing.Iterator[App] { getNextPage := func(ctx context.Context, req ListAppsRequest) (*ListAppsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -123,11 +123,11 @@ func (a *appsPreviewImpl) List(ctx context.Context, request ListAppsRequest) lis // List apps. // // Lists all apps in the workspace. -func (a *appsPreviewImpl) ListAll(ctx context.Context, request ListAppsRequest) ([]App, error) { +func (a *appsImpl) ListAll(ctx context.Context, request ListAppsRequest) ([]App, error) { iterator := a.List(ctx, request) return listing.ToSlice[App](ctx, iterator) } -func (a *appsPreviewImpl) internalList(ctx context.Context, request ListAppsRequest) (*ListAppsResponse, error) { +func (a *appsImpl) internalList(ctx context.Context, request ListAppsRequest) (*ListAppsResponse, error) { var listAppsResponse ListAppsResponse path := "/api/2.0preview/apps" queryParams := make(map[string]any) @@ -140,7 +140,7 @@ func (a *appsPreviewImpl) internalList(ctx context.Context, request ListAppsRequ // List app deployments. // // Lists all app deployments for the app with the supplied name. -func (a *appsPreviewImpl) ListDeployments(ctx context.Context, request ListAppDeploymentsRequest) listing.Iterator[AppDeployment] { +func (a *appsImpl) ListDeployments(ctx context.Context, request ListAppDeploymentsRequest) listing.Iterator[AppDeployment] { getNextPage := func(ctx context.Context, req ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -167,11 +167,11 @@ func (a *appsPreviewImpl) ListDeployments(ctx context.Context, request ListAppDe // List app deployments. // // Lists all app deployments for the app with the supplied name. -func (a *appsPreviewImpl) ListDeploymentsAll(ctx context.Context, request ListAppDeploymentsRequest) ([]AppDeployment, error) { +func (a *appsImpl) ListDeploymentsAll(ctx context.Context, request ListAppDeploymentsRequest) ([]AppDeployment, error) { iterator := a.ListDeployments(ctx, request) return listing.ToSlice[AppDeployment](ctx, iterator) } -func (a *appsPreviewImpl) internalListDeployments(ctx context.Context, request ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) { +func (a *appsImpl) internalListDeployments(ctx context.Context, request ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) { var listAppDeploymentsResponse ListAppDeploymentsResponse path := fmt.Sprintf("/api/2.0preview/apps/%v/deployments", request.AppName) queryParams := make(map[string]any) @@ -181,7 +181,7 @@ func (a *appsPreviewImpl) internalListDeployments(ctx context.Context, request L return &listAppDeploymentsResponse, err } -func (a *appsPreviewImpl) SetPermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error) { +func (a *appsImpl) SetPermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error) { var appPermissions AppPermissions path := fmt.Sprintf("/api/2.0preview/permissions/apps/%v", request.AppName) queryParams := make(map[string]any) @@ -192,7 +192,7 @@ func (a *appsPreviewImpl) SetPermissions(ctx context.Context, request AppPermiss return &appPermissions, err } -func (a *appsPreviewImpl) Start(ctx context.Context, request StartAppRequest) (*App, error) { +func (a *appsImpl) Start(ctx context.Context, request StartAppRequest) (*App, error) { var app App path := fmt.Sprintf("/api/2.0preview/apps/%v/start", request.Name) queryParams := make(map[string]any) @@ -203,7 +203,7 @@ func (a *appsPreviewImpl) Start(ctx context.Context, request StartAppRequest) (* return &app, err } -func (a *appsPreviewImpl) Stop(ctx context.Context, request StopAppRequest) (*App, error) { +func (a *appsImpl) Stop(ctx context.Context, request StopAppRequest) (*App, error) { var app App path := fmt.Sprintf("/api/2.0preview/apps/%v/stop", request.Name) queryParams := make(map[string]any) @@ -214,7 +214,7 @@ func (a *appsPreviewImpl) Stop(ctx context.Context, request StopAppRequest) (*Ap return &app, err } -func (a *appsPreviewImpl) Update(ctx context.Context, request UpdateAppRequest) (*App, error) { +func (a *appsImpl) Update(ctx context.Context, request UpdateAppRequest) (*App, error) { var app App path := fmt.Sprintf("/api/2.0preview/apps/%v", request.Name) queryParams := make(map[string]any) @@ -225,7 +225,7 @@ func (a *appsPreviewImpl) Update(ctx context.Context, request UpdateAppRequest) return &app, err } -func (a *appsPreviewImpl) UpdatePermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error) { +func (a *appsImpl) UpdatePermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error) { var appPermissions AppPermissions path := fmt.Sprintf("/api/2.0preview/permissions/apps/%v", request.AppName) queryParams := make(map[string]any) diff --git a/billing/v2preview/api.go b/billing/v2preview/api.go index 0b2d503bf..a51c8ebca 100755 --- a/billing/v2preview/api.go +++ b/billing/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Billable Usage Preview, Budget Policy Preview, Budgets Preview, Log Delivery Preview, Usage Dashboards Preview, etc. +// These APIs allow you to manage Billable Usage, Budget Policy, Budgets, Log Delivery, Usage Dashboards, etc. package billingpreview import ( @@ -12,7 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type BillableUsagePreviewInterface interface { +type BillableUsageInterface interface { // Return billable usage logs. // @@ -29,9 +29,9 @@ type BillableUsagePreviewInterface interface { Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) } -func NewBillableUsagePreview(client *client.DatabricksClient) *BillableUsagePreviewAPI { - return &BillableUsagePreviewAPI{ - billableUsagePreviewImpl: billableUsagePreviewImpl{ +func NewBillableUsage(client *client.DatabricksClient) *BillableUsageAPI { + return &BillableUsageAPI{ + billableUsageImpl: billableUsageImpl{ client: client, }, } @@ -39,11 +39,11 @@ func NewBillableUsagePreview(client *client.DatabricksClient) *BillableUsagePrev // This API allows you to download billable usage logs for the specified account // and date range. This feature works with all account types. -type BillableUsagePreviewAPI struct { - billableUsagePreviewImpl +type BillableUsageAPI struct { + billableUsageImpl } -type BudgetPolicyPreviewInterface interface { +type BudgetPolicyInterface interface { // Create a budget policy. // @@ -92,24 +92,24 @@ type BudgetPolicyPreviewInterface interface { Update(ctx context.Context, request UpdateBudgetPolicyRequest) (*BudgetPolicy, error) } -func NewBudgetPolicyPreview(client *client.DatabricksClient) *BudgetPolicyPreviewAPI { - return &BudgetPolicyPreviewAPI{ - budgetPolicyPreviewImpl: budgetPolicyPreviewImpl{ +func NewBudgetPolicy(client *client.DatabricksClient) *BudgetPolicyAPI { + return &BudgetPolicyAPI{ + budgetPolicyImpl: budgetPolicyImpl{ client: client, }, } } // A service serves REST API about Budget policies -type BudgetPolicyPreviewAPI struct { - budgetPolicyPreviewImpl +type BudgetPolicyAPI struct { + budgetPolicyImpl } // Delete a budget policy. // // Deletes a policy -func (a *BudgetPolicyPreviewAPI) DeleteByPolicyId(ctx context.Context, policyId string) error { - return a.budgetPolicyPreviewImpl.Delete(ctx, DeleteBudgetPolicyRequest{ +func (a *BudgetPolicyAPI) DeleteByPolicyId(ctx context.Context, policyId string) error { + return a.budgetPolicyImpl.Delete(ctx, DeleteBudgetPolicyRequest{ PolicyId: policyId, }) } @@ -117,13 +117,13 @@ func (a *BudgetPolicyPreviewAPI) DeleteByPolicyId(ctx context.Context, policyId // Get a budget policy. // // Retrieves a policy by it's ID. -func (a *BudgetPolicyPreviewAPI) GetByPolicyId(ctx context.Context, policyId string) (*BudgetPolicy, error) { - return a.budgetPolicyPreviewImpl.Get(ctx, GetBudgetPolicyRequest{ +func (a *BudgetPolicyAPI) GetByPolicyId(ctx context.Context, policyId string) (*BudgetPolicy, error) { + return a.budgetPolicyImpl.Get(ctx, GetBudgetPolicyRequest{ PolicyId: policyId, }) } -type BudgetsPreviewInterface interface { +type BudgetsInterface interface { // Create new budget. // @@ -176,9 +176,9 @@ type BudgetsPreviewInterface interface { Update(ctx context.Context, request UpdateBudgetConfigurationRequest) (*UpdateBudgetConfigurationResponse, error) } -func NewBudgetsPreview(client *client.DatabricksClient) *BudgetsPreviewAPI { - return &BudgetsPreviewAPI{ - budgetsPreviewImpl: budgetsPreviewImpl{ +func NewBudgets(client *client.DatabricksClient) *BudgetsAPI { + return &BudgetsAPI{ + budgetsImpl: budgetsImpl{ client: client, }, } @@ -188,16 +188,16 @@ func NewBudgetsPreview(client *client.DatabricksClient) *BudgetsPreviewAPI { // to monitor usage across your account. You can set up budgets to either track // account-wide spending, or apply filters to track the spending of specific // teams, projects, or workspaces. -type BudgetsPreviewAPI struct { - budgetsPreviewImpl +type BudgetsAPI struct { + budgetsImpl } // Delete budget. // // Deletes a budget configuration for an account. Both account and budget // configuration are specified by ID. This cannot be undone. -func (a *BudgetsPreviewAPI) DeleteByBudgetId(ctx context.Context, budgetId string) error { - return a.budgetsPreviewImpl.Delete(ctx, DeleteBudgetConfigurationRequest{ +func (a *BudgetsAPI) DeleteByBudgetId(ctx context.Context, budgetId string) error { + return a.budgetsImpl.Delete(ctx, DeleteBudgetConfigurationRequest{ BudgetId: budgetId, }) } @@ -206,13 +206,13 @@ func (a *BudgetsPreviewAPI) DeleteByBudgetId(ctx context.Context, budgetId strin // // Gets a budget configuration for an account. Both account and budget // configuration are specified by ID. -func (a *BudgetsPreviewAPI) GetByBudgetId(ctx context.Context, budgetId string) (*GetBudgetConfigurationResponse, error) { - return a.budgetsPreviewImpl.Get(ctx, GetBudgetConfigurationRequest{ +func (a *BudgetsAPI) GetByBudgetId(ctx context.Context, budgetId string) (*GetBudgetConfigurationResponse, error) { + return a.budgetsImpl.Get(ctx, GetBudgetConfigurationRequest{ BudgetId: budgetId, }) } -type LogDeliveryPreviewInterface interface { +type LogDeliveryInterface interface { // Create a new log delivery configuration. // @@ -271,7 +271,7 @@ type LogDeliveryPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListLogDeliveryRequest) ([]LogDeliveryConfiguration, error) - // LogDeliveryConfigurationConfigNameToConfigIdMap calls [LogDeliveryPreviewAPI.ListAll] and creates a map of results with [LogDeliveryConfiguration].ConfigName as key and [LogDeliveryConfiguration].ConfigId as value. + // LogDeliveryConfigurationConfigNameToConfigIdMap calls [LogDeliveryAPI.ListAll] and creates a map of results with [LogDeliveryConfiguration].ConfigName as key and [LogDeliveryConfiguration].ConfigId as value. // // Returns an error if there's more than one [LogDeliveryConfiguration] with the same .ConfigName. // @@ -280,7 +280,7 @@ type LogDeliveryPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. LogDeliveryConfigurationConfigNameToConfigIdMap(ctx context.Context, request ListLogDeliveryRequest) (map[string]string, error) - // GetByConfigName calls [LogDeliveryPreviewAPI.LogDeliveryConfigurationConfigNameToConfigIdMap] and returns a single [LogDeliveryConfiguration]. + // GetByConfigName calls [LogDeliveryAPI.LogDeliveryConfigurationConfigNameToConfigIdMap] and returns a single [LogDeliveryConfiguration]. // // Returns an error if there's more than one [LogDeliveryConfiguration] with the same .ConfigName. // @@ -299,9 +299,9 @@ type LogDeliveryPreviewInterface interface { PatchStatus(ctx context.Context, request UpdateLogDeliveryConfigurationStatusRequest) error } -func NewLogDeliveryPreview(client *client.DatabricksClient) *LogDeliveryPreviewAPI { - return &LogDeliveryPreviewAPI{ - logDeliveryPreviewImpl: logDeliveryPreviewImpl{ +func NewLogDelivery(client *client.DatabricksClient) *LogDeliveryAPI { + return &LogDeliveryAPI{ + logDeliveryImpl: logDeliveryImpl{ client: client, }, } @@ -368,28 +368,28 @@ func NewLogDeliveryPreview(client *client.DatabricksClient) *LogDeliveryPreviewA // [Billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html // [Usage page]: https://docs.databricks.com/administration-guide/account-settings/usage.html // [create a new AWS S3 bucket]: https://docs.databricks.com/administration-guide/account-api/aws-storage.html -type LogDeliveryPreviewAPI struct { - logDeliveryPreviewImpl +type LogDeliveryAPI struct { + logDeliveryImpl } // Get log delivery configuration. // // Gets a Databricks log delivery configuration object for an account, both // specified by ID. -func (a *LogDeliveryPreviewAPI) GetByLogDeliveryConfigurationId(ctx context.Context, logDeliveryConfigurationId string) (*WrappedLogDeliveryConfiguration, error) { - return a.logDeliveryPreviewImpl.Get(ctx, GetLogDeliveryRequest{ +func (a *LogDeliveryAPI) GetByLogDeliveryConfigurationId(ctx context.Context, logDeliveryConfigurationId string) (*WrappedLogDeliveryConfiguration, error) { + return a.logDeliveryImpl.Get(ctx, GetLogDeliveryRequest{ LogDeliveryConfigurationId: logDeliveryConfigurationId, }) } -// LogDeliveryConfigurationConfigNameToConfigIdMap calls [LogDeliveryPreviewAPI.ListAll] and creates a map of results with [LogDeliveryConfiguration].ConfigName as key and [LogDeliveryConfiguration].ConfigId as value. +// LogDeliveryConfigurationConfigNameToConfigIdMap calls [LogDeliveryAPI.ListAll] and creates a map of results with [LogDeliveryConfiguration].ConfigName as key and [LogDeliveryConfiguration].ConfigId as value. // // Returns an error if there's more than one [LogDeliveryConfiguration] with the same .ConfigName. // // Note: All [LogDeliveryConfiguration] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *LogDeliveryPreviewAPI) LogDeliveryConfigurationConfigNameToConfigIdMap(ctx context.Context, request ListLogDeliveryRequest) (map[string]string, error) { +func (a *LogDeliveryAPI) LogDeliveryConfigurationConfigNameToConfigIdMap(ctx context.Context, request ListLogDeliveryRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -407,14 +407,14 @@ func (a *LogDeliveryPreviewAPI) LogDeliveryConfigurationConfigNameToConfigIdMap( return mapping, nil } -// GetByConfigName calls [LogDeliveryPreviewAPI.LogDeliveryConfigurationConfigNameToConfigIdMap] and returns a single [LogDeliveryConfiguration]. +// GetByConfigName calls [LogDeliveryAPI.LogDeliveryConfigurationConfigNameToConfigIdMap] and returns a single [LogDeliveryConfiguration]. // // Returns an error if there's more than one [LogDeliveryConfiguration] with the same .ConfigName. // // Note: All [LogDeliveryConfiguration] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *LogDeliveryPreviewAPI) GetByConfigName(ctx context.Context, name string) (*LogDeliveryConfiguration, error) { +func (a *LogDeliveryAPI) GetByConfigName(ctx context.Context, name string) (*LogDeliveryConfiguration, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListLogDeliveryRequest{}) if err != nil { @@ -435,7 +435,7 @@ func (a *LogDeliveryPreviewAPI) GetByConfigName(ctx context.Context, name string return &alternatives[0], nil } -type UsageDashboardsPreviewInterface interface { +type UsageDashboardsInterface interface { // Create new usage dashboard. // @@ -450,9 +450,9 @@ type UsageDashboardsPreviewInterface interface { Get(ctx context.Context, request GetBillingUsageDashboardRequest) (*GetBillingUsageDashboardResponse, error) } -func NewUsageDashboardsPreview(client *client.DatabricksClient) *UsageDashboardsPreviewAPI { - return &UsageDashboardsPreviewAPI{ - usageDashboardsPreviewImpl: usageDashboardsPreviewImpl{ +func NewUsageDashboards(client *client.DatabricksClient) *UsageDashboardsAPI { + return &UsageDashboardsAPI{ + usageDashboardsImpl: usageDashboardsImpl{ client: client, }, } @@ -461,6 +461,6 @@ func NewUsageDashboardsPreview(client *client.DatabricksClient) *UsageDashboards // These APIs manage usage dashboards for this account. Usage dashboards enable // you to gain insights into your usage with pre-built dashboards: visualize // breakdowns, analyze tag attributions, and identify cost drivers. -type UsageDashboardsPreviewAPI struct { - usageDashboardsPreviewImpl +type UsageDashboardsAPI struct { + usageDashboardsImpl } diff --git a/billing/v2preview/client.go b/billing/v2preview/client.go index dcbf0833f..8c9d68634 100755 --- a/billing/v2preview/client.go +++ b/billing/v2preview/client.go @@ -9,13 +9,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/config" ) -type BillableUsagePreviewClient struct { - BillableUsagePreviewInterface +type BillableUsageClient struct { + BillableUsageInterface Config *config.Config } -func NewBillableUsagePreviewClient(cfg *config.Config) (*BillableUsagePreviewClient, error) { +func NewBillableUsageClient(cfg *config.Config) (*BillableUsageClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -33,19 +33,19 @@ func NewBillableUsagePreviewClient(cfg *config.Config) (*BillableUsagePreviewCli return nil, err } - return &BillableUsagePreviewClient{ - Config: cfg, - BillableUsagePreviewInterface: NewBillableUsagePreview(apiClient), + return &BillableUsageClient{ + Config: cfg, + BillableUsageInterface: NewBillableUsage(apiClient), }, nil } -type BudgetPolicyPreviewClient struct { - BudgetPolicyPreviewInterface +type BudgetPolicyClient struct { + BudgetPolicyInterface Config *config.Config } -func NewBudgetPolicyPreviewClient(cfg *config.Config) (*BudgetPolicyPreviewClient, error) { +func NewBudgetPolicyClient(cfg *config.Config) (*BudgetPolicyClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -63,19 +63,19 @@ func NewBudgetPolicyPreviewClient(cfg *config.Config) (*BudgetPolicyPreviewClien return nil, err } - return &BudgetPolicyPreviewClient{ - Config: cfg, - BudgetPolicyPreviewInterface: NewBudgetPolicyPreview(apiClient), + return &BudgetPolicyClient{ + Config: cfg, + BudgetPolicyInterface: NewBudgetPolicy(apiClient), }, nil } -type BudgetsPreviewClient struct { - BudgetsPreviewInterface +type BudgetsClient struct { + BudgetsInterface Config *config.Config } -func NewBudgetsPreviewClient(cfg *config.Config) (*BudgetsPreviewClient, error) { +func NewBudgetsClient(cfg *config.Config) (*BudgetsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -93,19 +93,19 @@ func NewBudgetsPreviewClient(cfg *config.Config) (*BudgetsPreviewClient, error) return nil, err } - return &BudgetsPreviewClient{ - Config: cfg, - BudgetsPreviewInterface: NewBudgetsPreview(apiClient), + return &BudgetsClient{ + Config: cfg, + BudgetsInterface: NewBudgets(apiClient), }, nil } -type LogDeliveryPreviewClient struct { - LogDeliveryPreviewInterface +type LogDeliveryClient struct { + LogDeliveryInterface Config *config.Config } -func NewLogDeliveryPreviewClient(cfg *config.Config) (*LogDeliveryPreviewClient, error) { +func NewLogDeliveryClient(cfg *config.Config) (*LogDeliveryClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -123,19 +123,19 @@ func NewLogDeliveryPreviewClient(cfg *config.Config) (*LogDeliveryPreviewClient, return nil, err } - return &LogDeliveryPreviewClient{ - Config: cfg, - LogDeliveryPreviewInterface: NewLogDeliveryPreview(apiClient), + return &LogDeliveryClient{ + Config: cfg, + LogDeliveryInterface: NewLogDelivery(apiClient), }, nil } -type UsageDashboardsPreviewClient struct { - UsageDashboardsPreviewInterface +type UsageDashboardsClient struct { + UsageDashboardsInterface Config *config.Config } -func NewUsageDashboardsPreviewClient(cfg *config.Config) (*UsageDashboardsPreviewClient, error) { +func NewUsageDashboardsClient(cfg *config.Config) (*UsageDashboardsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -153,8 +153,8 @@ func NewUsageDashboardsPreviewClient(cfg *config.Config) (*UsageDashboardsPrevie return nil, err } - return &UsageDashboardsPreviewClient{ - Config: cfg, - UsageDashboardsPreviewInterface: NewUsageDashboardsPreview(apiClient), + return &UsageDashboardsClient{ + Config: cfg, + UsageDashboardsInterface: NewUsageDashboards(apiClient), }, nil } diff --git a/billing/v2preview/impl.go b/billing/v2preview/impl.go index e03271b44..1019acf6b 100755 --- a/billing/v2preview/impl.go +++ b/billing/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just BillableUsagePreview API methods -type billableUsagePreviewImpl struct { +// unexported type that holds implementations of just BillableUsage API methods +type billableUsageImpl struct { client *client.DatabricksClient } -func (a *billableUsagePreviewImpl) Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) { +func (a *billableUsageImpl) Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) { var downloadResponse DownloadResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/usage/download", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -27,12 +27,12 @@ func (a *billableUsagePreviewImpl) Download(ctx context.Context, request Downloa return &downloadResponse, err } -// unexported type that holds implementations of just BudgetPolicyPreview API methods -type budgetPolicyPreviewImpl struct { +// unexported type that holds implementations of just BudgetPolicy API methods +type budgetPolicyImpl struct { client *client.DatabricksClient } -func (a *budgetPolicyPreviewImpl) Create(ctx context.Context, request CreateBudgetPolicyRequest) (*BudgetPolicy, error) { +func (a *budgetPolicyImpl) Create(ctx context.Context, request CreateBudgetPolicyRequest) (*BudgetPolicy, error) { var budgetPolicy BudgetPolicy path := fmt.Sprintf("/api/2.1preview/accounts/%v/budget-policies", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -43,7 +43,7 @@ func (a *budgetPolicyPreviewImpl) Create(ctx context.Context, request CreateBudg return &budgetPolicy, err } -func (a *budgetPolicyPreviewImpl) Delete(ctx context.Context, request DeleteBudgetPolicyRequest) error { +func (a *budgetPolicyImpl) Delete(ctx context.Context, request DeleteBudgetPolicyRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/accounts/%v/budget-policies/%v", a.client.ConfiguredAccountID(), request.PolicyId) queryParams := make(map[string]any) @@ -53,7 +53,7 @@ func (a *budgetPolicyPreviewImpl) Delete(ctx context.Context, request DeleteBudg return err } -func (a *budgetPolicyPreviewImpl) Get(ctx context.Context, request GetBudgetPolicyRequest) (*BudgetPolicy, error) { +func (a *budgetPolicyImpl) Get(ctx context.Context, request GetBudgetPolicyRequest) (*BudgetPolicy, error) { var budgetPolicy BudgetPolicy path := fmt.Sprintf("/api/2.1preview/accounts/%v/budget-policies/%v", a.client.ConfiguredAccountID(), request.PolicyId) queryParams := make(map[string]any) @@ -67,7 +67,7 @@ func (a *budgetPolicyPreviewImpl) Get(ctx context.Context, request GetBudgetPoli // // Lists all policies. Policies are returned in the alphabetically ascending // order of their names. -func (a *budgetPolicyPreviewImpl) List(ctx context.Context, request ListBudgetPoliciesRequest) listing.Iterator[BudgetPolicy] { +func (a *budgetPolicyImpl) List(ctx context.Context, request ListBudgetPoliciesRequest) listing.Iterator[BudgetPolicy] { getNextPage := func(ctx context.Context, req ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -95,11 +95,11 @@ func (a *budgetPolicyPreviewImpl) List(ctx context.Context, request ListBudgetPo // // Lists all policies. Policies are returned in the alphabetically ascending // order of their names. -func (a *budgetPolicyPreviewImpl) ListAll(ctx context.Context, request ListBudgetPoliciesRequest) ([]BudgetPolicy, error) { +func (a *budgetPolicyImpl) ListAll(ctx context.Context, request ListBudgetPoliciesRequest) ([]BudgetPolicy, error) { iterator := a.List(ctx, request) return listing.ToSlice[BudgetPolicy](ctx, iterator) } -func (a *budgetPolicyPreviewImpl) internalList(ctx context.Context, request ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) { +func (a *budgetPolicyImpl) internalList(ctx context.Context, request ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) { var listBudgetPoliciesResponse ListBudgetPoliciesResponse path := fmt.Sprintf("/api/2.1preview/accounts/%v/budget-policies", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -109,7 +109,7 @@ func (a *budgetPolicyPreviewImpl) internalList(ctx context.Context, request List return &listBudgetPoliciesResponse, err } -func (a *budgetPolicyPreviewImpl) Update(ctx context.Context, request UpdateBudgetPolicyRequest) (*BudgetPolicy, error) { +func (a *budgetPolicyImpl) Update(ctx context.Context, request UpdateBudgetPolicyRequest) (*BudgetPolicy, error) { var budgetPolicy BudgetPolicy path := fmt.Sprintf("/api/2.1preview/accounts/%v/budget-policies/%v", a.client.ConfiguredAccountID(), request.PolicyId) queryParams := make(map[string]any) @@ -123,12 +123,12 @@ func (a *budgetPolicyPreviewImpl) Update(ctx context.Context, request UpdateBudg return &budgetPolicy, err } -// unexported type that holds implementations of just BudgetsPreview API methods -type budgetsPreviewImpl struct { +// unexported type that holds implementations of just Budgets API methods +type budgetsImpl struct { client *client.DatabricksClient } -func (a *budgetsPreviewImpl) Create(ctx context.Context, request CreateBudgetConfigurationRequest) (*CreateBudgetConfigurationResponse, error) { +func (a *budgetsImpl) Create(ctx context.Context, request CreateBudgetConfigurationRequest) (*CreateBudgetConfigurationResponse, error) { var createBudgetConfigurationResponse CreateBudgetConfigurationResponse path := fmt.Sprintf("/api/2.1preview/accounts/%v/budgets", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -139,7 +139,7 @@ func (a *budgetsPreviewImpl) Create(ctx context.Context, request CreateBudgetCon return &createBudgetConfigurationResponse, err } -func (a *budgetsPreviewImpl) Delete(ctx context.Context, request DeleteBudgetConfigurationRequest) error { +func (a *budgetsImpl) Delete(ctx context.Context, request DeleteBudgetConfigurationRequest) error { var deleteBudgetConfigurationResponse DeleteBudgetConfigurationResponse path := fmt.Sprintf("/api/2.1preview/accounts/%v/budgets/%v", a.client.ConfiguredAccountID(), request.BudgetId) queryParams := make(map[string]any) @@ -149,7 +149,7 @@ func (a *budgetsPreviewImpl) Delete(ctx context.Context, request DeleteBudgetCon return err } -func (a *budgetsPreviewImpl) Get(ctx context.Context, request GetBudgetConfigurationRequest) (*GetBudgetConfigurationResponse, error) { +func (a *budgetsImpl) Get(ctx context.Context, request GetBudgetConfigurationRequest) (*GetBudgetConfigurationResponse, error) { var getBudgetConfigurationResponse GetBudgetConfigurationResponse path := fmt.Sprintf("/api/2.1preview/accounts/%v/budgets/%v", a.client.ConfiguredAccountID(), request.BudgetId) queryParams := make(map[string]any) @@ -162,7 +162,7 @@ func (a *budgetsPreviewImpl) Get(ctx context.Context, request GetBudgetConfigura // Get all budgets. // // Gets all budgets associated with this account. -func (a *budgetsPreviewImpl) List(ctx context.Context, request ListBudgetConfigurationsRequest) listing.Iterator[BudgetConfiguration] { +func (a *budgetsImpl) List(ctx context.Context, request ListBudgetConfigurationsRequest) listing.Iterator[BudgetConfiguration] { getNextPage := func(ctx context.Context, req ListBudgetConfigurationsRequest) (*ListBudgetConfigurationsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -189,11 +189,11 @@ func (a *budgetsPreviewImpl) List(ctx context.Context, request ListBudgetConfigu // Get all budgets. // // Gets all budgets associated with this account. -func (a *budgetsPreviewImpl) ListAll(ctx context.Context, request ListBudgetConfigurationsRequest) ([]BudgetConfiguration, error) { +func (a *budgetsImpl) ListAll(ctx context.Context, request ListBudgetConfigurationsRequest) ([]BudgetConfiguration, error) { iterator := a.List(ctx, request) return listing.ToSlice[BudgetConfiguration](ctx, iterator) } -func (a *budgetsPreviewImpl) internalList(ctx context.Context, request ListBudgetConfigurationsRequest) (*ListBudgetConfigurationsResponse, error) { +func (a *budgetsImpl) internalList(ctx context.Context, request ListBudgetConfigurationsRequest) (*ListBudgetConfigurationsResponse, error) { var listBudgetConfigurationsResponse ListBudgetConfigurationsResponse path := fmt.Sprintf("/api/2.1preview/accounts/%v/budgets", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -203,7 +203,7 @@ func (a *budgetsPreviewImpl) internalList(ctx context.Context, request ListBudge return &listBudgetConfigurationsResponse, err } -func (a *budgetsPreviewImpl) Update(ctx context.Context, request UpdateBudgetConfigurationRequest) (*UpdateBudgetConfigurationResponse, error) { +func (a *budgetsImpl) Update(ctx context.Context, request UpdateBudgetConfigurationRequest) (*UpdateBudgetConfigurationResponse, error) { var updateBudgetConfigurationResponse UpdateBudgetConfigurationResponse path := fmt.Sprintf("/api/2.1preview/accounts/%v/budgets/%v", a.client.ConfiguredAccountID(), request.BudgetId) queryParams := make(map[string]any) @@ -214,12 +214,12 @@ func (a *budgetsPreviewImpl) Update(ctx context.Context, request UpdateBudgetCon return &updateBudgetConfigurationResponse, err } -// unexported type that holds implementations of just LogDeliveryPreview API methods -type logDeliveryPreviewImpl struct { +// unexported type that holds implementations of just LogDelivery API methods +type logDeliveryImpl struct { client *client.DatabricksClient } -func (a *logDeliveryPreviewImpl) Create(ctx context.Context, request WrappedCreateLogDeliveryConfiguration) (*WrappedLogDeliveryConfiguration, error) { +func (a *logDeliveryImpl) Create(ctx context.Context, request WrappedCreateLogDeliveryConfiguration) (*WrappedLogDeliveryConfiguration, error) { var wrappedLogDeliveryConfiguration WrappedLogDeliveryConfiguration path := fmt.Sprintf("/api/2.0preview/accounts/%v/log-delivery", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -230,7 +230,7 @@ func (a *logDeliveryPreviewImpl) Create(ctx context.Context, request WrappedCrea return &wrappedLogDeliveryConfiguration, err } -func (a *logDeliveryPreviewImpl) Get(ctx context.Context, request GetLogDeliveryRequest) (*WrappedLogDeliveryConfiguration, error) { +func (a *logDeliveryImpl) Get(ctx context.Context, request GetLogDeliveryRequest) (*WrappedLogDeliveryConfiguration, error) { var wrappedLogDeliveryConfiguration WrappedLogDeliveryConfiguration path := fmt.Sprintf("/api/2.0preview/accounts/%v/log-delivery/%v", a.client.ConfiguredAccountID(), request.LogDeliveryConfigurationId) queryParams := make(map[string]any) @@ -244,7 +244,7 @@ func (a *logDeliveryPreviewImpl) Get(ctx context.Context, request GetLogDelivery // // Gets all Databricks log delivery configurations associated with an account // specified by ID. -func (a *logDeliveryPreviewImpl) List(ctx context.Context, request ListLogDeliveryRequest) listing.Iterator[LogDeliveryConfiguration] { +func (a *logDeliveryImpl) List(ctx context.Context, request ListLogDeliveryRequest) listing.Iterator[LogDeliveryConfiguration] { getNextPage := func(ctx context.Context, req ListLogDeliveryRequest) (*WrappedLogDeliveryConfigurations, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -266,11 +266,11 @@ func (a *logDeliveryPreviewImpl) List(ctx context.Context, request ListLogDelive // // Gets all Databricks log delivery configurations associated with an account // specified by ID. -func (a *logDeliveryPreviewImpl) ListAll(ctx context.Context, request ListLogDeliveryRequest) ([]LogDeliveryConfiguration, error) { +func (a *logDeliveryImpl) ListAll(ctx context.Context, request ListLogDeliveryRequest) ([]LogDeliveryConfiguration, error) { iterator := a.List(ctx, request) return listing.ToSlice[LogDeliveryConfiguration](ctx, iterator) } -func (a *logDeliveryPreviewImpl) internalList(ctx context.Context, request ListLogDeliveryRequest) (*WrappedLogDeliveryConfigurations, error) { +func (a *logDeliveryImpl) internalList(ctx context.Context, request ListLogDeliveryRequest) (*WrappedLogDeliveryConfigurations, error) { var wrappedLogDeliveryConfigurations WrappedLogDeliveryConfigurations path := fmt.Sprintf("/api/2.0preview/accounts/%v/log-delivery", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -280,7 +280,7 @@ func (a *logDeliveryPreviewImpl) internalList(ctx context.Context, request ListL return &wrappedLogDeliveryConfigurations, err } -func (a *logDeliveryPreviewImpl) PatchStatus(ctx context.Context, request UpdateLogDeliveryConfigurationStatusRequest) error { +func (a *logDeliveryImpl) PatchStatus(ctx context.Context, request UpdateLogDeliveryConfigurationStatusRequest) error { var patchStatusResponse PatchStatusResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/log-delivery/%v", a.client.ConfiguredAccountID(), request.LogDeliveryConfigurationId) queryParams := make(map[string]any) @@ -291,12 +291,12 @@ func (a *logDeliveryPreviewImpl) PatchStatus(ctx context.Context, request Update return err } -// unexported type that holds implementations of just UsageDashboardsPreview API methods -type usageDashboardsPreviewImpl struct { +// unexported type that holds implementations of just UsageDashboards API methods +type usageDashboardsImpl struct { client *client.DatabricksClient } -func (a *usageDashboardsPreviewImpl) Create(ctx context.Context, request CreateBillingUsageDashboardRequest) (*CreateBillingUsageDashboardResponse, error) { +func (a *usageDashboardsImpl) Create(ctx context.Context, request CreateBillingUsageDashboardRequest) (*CreateBillingUsageDashboardResponse, error) { var createBillingUsageDashboardResponse CreateBillingUsageDashboardResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/dashboard", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -307,7 +307,7 @@ func (a *usageDashboardsPreviewImpl) Create(ctx context.Context, request CreateB return &createBillingUsageDashboardResponse, err } -func (a *usageDashboardsPreviewImpl) Get(ctx context.Context, request GetBillingUsageDashboardRequest) (*GetBillingUsageDashboardResponse, error) { +func (a *usageDashboardsImpl) Get(ctx context.Context, request GetBillingUsageDashboardRequest) (*GetBillingUsageDashboardResponse, error) { var getBillingUsageDashboardResponse GetBillingUsageDashboardResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/dashboard", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) diff --git a/catalog/v2preview/api.go b/catalog/v2preview/api.go index e5a848ef1..7d776f626 100755 --- a/catalog/v2preview/api.go +++ b/catalog/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Account Metastore Assignments Preview, Account Metastores Preview, Account Storage Credentials Preview, Artifact Allowlists Preview, Catalogs Preview, Connections Preview, Credentials Preview, External Locations Preview, Functions Preview, Grants Preview, Metastores Preview, Model Versions Preview, Online Tables Preview, Quality Monitors Preview, Registered Models Preview, Resource Quotas Preview, Schemas Preview, Storage Credentials Preview, System Schemas Preview, Table Constraints Preview, Tables Preview, Temporary Table Credentials Preview, Volumes Preview, Workspace Bindings Preview, etc. +// These APIs allow you to manage Account Metastore Assignments, Account Metastores, Account Storage Credentials, Artifact Allowlists, Catalogs, Connections, Credentials, External Locations, Functions, Grants, Metastores, Model Versions, Online Tables, Quality Monitors, Registered Models, Resource Quotas, Schemas, Storage Credentials, System Schemas, Table Constraints, Tables, Temporary Table Credentials, Volumes, Workspace Bindings, etc. package catalogpreview import ( @@ -12,7 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type AccountMetastoreAssignmentsPreviewInterface interface { +type AccountMetastoreAssignmentsInterface interface { // Assigns a workspace to a metastore. // @@ -76,25 +76,25 @@ type AccountMetastoreAssignmentsPreviewInterface interface { Update(ctx context.Context, request AccountsUpdateMetastoreAssignment) error } -func NewAccountMetastoreAssignmentsPreview(client *client.DatabricksClient) *AccountMetastoreAssignmentsPreviewAPI { - return &AccountMetastoreAssignmentsPreviewAPI{ - accountMetastoreAssignmentsPreviewImpl: accountMetastoreAssignmentsPreviewImpl{ +func NewAccountMetastoreAssignments(client *client.DatabricksClient) *AccountMetastoreAssignmentsAPI { + return &AccountMetastoreAssignmentsAPI{ + accountMetastoreAssignmentsImpl: accountMetastoreAssignmentsImpl{ client: client, }, } } // These APIs manage metastore assignments to a workspace. -type AccountMetastoreAssignmentsPreviewAPI struct { - accountMetastoreAssignmentsPreviewImpl +type AccountMetastoreAssignmentsAPI struct { + accountMetastoreAssignmentsImpl } // Delete a metastore assignment. // // Deletes a metastore assignment to a workspace, leaving the workspace with no // metastore. -func (a *AccountMetastoreAssignmentsPreviewAPI) DeleteByWorkspaceIdAndMetastoreId(ctx context.Context, workspaceId int64, metastoreId string) error { - return a.accountMetastoreAssignmentsPreviewImpl.Delete(ctx, DeleteAccountMetastoreAssignmentRequest{ +func (a *AccountMetastoreAssignmentsAPI) DeleteByWorkspaceIdAndMetastoreId(ctx context.Context, workspaceId int64, metastoreId string) error { + return a.accountMetastoreAssignmentsImpl.Delete(ctx, DeleteAccountMetastoreAssignmentRequest{ WorkspaceId: workspaceId, MetastoreId: metastoreId, }) @@ -106,8 +106,8 @@ func (a *AccountMetastoreAssignmentsPreviewAPI) DeleteByWorkspaceIdAndMetastoreI // the workspace is assigned a metastore, the mappig will be returned. If no // metastore is assigned to the workspace, the assignment will not be found and // a 404 returned. -func (a *AccountMetastoreAssignmentsPreviewAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*AccountsMetastoreAssignment, error) { - return a.accountMetastoreAssignmentsPreviewImpl.Get(ctx, GetAccountMetastoreAssignmentRequest{ +func (a *AccountMetastoreAssignmentsAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*AccountsMetastoreAssignment, error) { + return a.accountMetastoreAssignmentsImpl.Get(ctx, GetAccountMetastoreAssignmentRequest{ WorkspaceId: workspaceId, }) } @@ -116,13 +116,13 @@ func (a *AccountMetastoreAssignmentsPreviewAPI) GetByWorkspaceId(ctx context.Con // // Gets a list of all Databricks workspace IDs that have been assigned to given // metastore. -func (a *AccountMetastoreAssignmentsPreviewAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListAccountMetastoreAssignmentsResponse, error) { - return a.accountMetastoreAssignmentsPreviewImpl.internalList(ctx, ListAccountMetastoreAssignmentsRequest{ +func (a *AccountMetastoreAssignmentsAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListAccountMetastoreAssignmentsResponse, error) { + return a.accountMetastoreAssignmentsImpl.internalList(ctx, ListAccountMetastoreAssignmentsRequest{ MetastoreId: metastoreId, }) } -type AccountMetastoresPreviewInterface interface { +type AccountMetastoresInterface interface { // Create metastore. // @@ -169,9 +169,9 @@ type AccountMetastoresPreviewInterface interface { Update(ctx context.Context, request AccountsUpdateMetastore) (*AccountsMetastoreInfo, error) } -func NewAccountMetastoresPreview(client *client.DatabricksClient) *AccountMetastoresPreviewAPI { - return &AccountMetastoresPreviewAPI{ - accountMetastoresPreviewImpl: accountMetastoresPreviewImpl{ +func NewAccountMetastores(client *client.DatabricksClient) *AccountMetastoresAPI { + return &AccountMetastoresAPI{ + accountMetastoresImpl: accountMetastoresImpl{ client: client, }, } @@ -179,15 +179,15 @@ func NewAccountMetastoresPreview(client *client.DatabricksClient) *AccountMetast // These APIs manage Unity Catalog metastores for an account. A metastore // contains catalogs that can be associated with workspaces -type AccountMetastoresPreviewAPI struct { - accountMetastoresPreviewImpl +type AccountMetastoresAPI struct { + accountMetastoresImpl } // Delete a metastore. // // Deletes a Unity Catalog metastore for an account, both specified by ID. -func (a *AccountMetastoresPreviewAPI) DeleteByMetastoreId(ctx context.Context, metastoreId string) error { - return a.accountMetastoresPreviewImpl.Delete(ctx, DeleteAccountMetastoreRequest{ +func (a *AccountMetastoresAPI) DeleteByMetastoreId(ctx context.Context, metastoreId string) error { + return a.accountMetastoresImpl.Delete(ctx, DeleteAccountMetastoreRequest{ MetastoreId: metastoreId, }) } @@ -195,13 +195,13 @@ func (a *AccountMetastoresPreviewAPI) DeleteByMetastoreId(ctx context.Context, m // Get a metastore. // // Gets a Unity Catalog metastore from an account, both specified by ID. -func (a *AccountMetastoresPreviewAPI) GetByMetastoreId(ctx context.Context, metastoreId string) (*AccountsMetastoreInfo, error) { - return a.accountMetastoresPreviewImpl.Get(ctx, GetAccountMetastoreRequest{ +func (a *AccountMetastoresAPI) GetByMetastoreId(ctx context.Context, metastoreId string) (*AccountsMetastoreInfo, error) { + return a.accountMetastoresImpl.Get(ctx, GetAccountMetastoreRequest{ MetastoreId: metastoreId, }) } -type AccountStorageCredentialsPreviewInterface interface { +type AccountStorageCredentialsInterface interface { // Create a storage credential. // @@ -271,25 +271,25 @@ type AccountStorageCredentialsPreviewInterface interface { Update(ctx context.Context, request AccountsUpdateStorageCredential) (*AccountsStorageCredentialInfo, error) } -func NewAccountStorageCredentialsPreview(client *client.DatabricksClient) *AccountStorageCredentialsPreviewAPI { - return &AccountStorageCredentialsPreviewAPI{ - accountStorageCredentialsPreviewImpl: accountStorageCredentialsPreviewImpl{ +func NewAccountStorageCredentials(client *client.DatabricksClient) *AccountStorageCredentialsAPI { + return &AccountStorageCredentialsAPI{ + accountStorageCredentialsImpl: accountStorageCredentialsImpl{ client: client, }, } } // These APIs manage storage credentials for a particular metastore. -type AccountStorageCredentialsPreviewAPI struct { - accountStorageCredentialsPreviewImpl +type AccountStorageCredentialsAPI struct { + accountStorageCredentialsImpl } // Delete a storage credential. // // Deletes a storage credential from the metastore. The caller must be an owner // of the storage credential. -func (a *AccountStorageCredentialsPreviewAPI) DeleteByMetastoreIdAndStorageCredentialName(ctx context.Context, metastoreId string, storageCredentialName string) error { - return a.accountStorageCredentialsPreviewImpl.Delete(ctx, DeleteAccountStorageCredentialRequest{ +func (a *AccountStorageCredentialsAPI) DeleteByMetastoreIdAndStorageCredentialName(ctx context.Context, metastoreId string, storageCredentialName string) error { + return a.accountStorageCredentialsImpl.Delete(ctx, DeleteAccountStorageCredentialRequest{ MetastoreId: metastoreId, StorageCredentialName: storageCredentialName, }) @@ -300,8 +300,8 @@ func (a *AccountStorageCredentialsPreviewAPI) DeleteByMetastoreIdAndStorageCrede // Gets a storage credential from the metastore. The caller must be a metastore // admin, the owner of the storage credential, or have a level of privilege on // the storage credential. -func (a *AccountStorageCredentialsPreviewAPI) GetByMetastoreIdAndStorageCredentialName(ctx context.Context, metastoreId string, storageCredentialName string) (*AccountsStorageCredentialInfo, error) { - return a.accountStorageCredentialsPreviewImpl.Get(ctx, GetAccountStorageCredentialRequest{ +func (a *AccountStorageCredentialsAPI) GetByMetastoreIdAndStorageCredentialName(ctx context.Context, metastoreId string, storageCredentialName string) (*AccountsStorageCredentialInfo, error) { + return a.accountStorageCredentialsImpl.Get(ctx, GetAccountStorageCredentialRequest{ MetastoreId: metastoreId, StorageCredentialName: storageCredentialName, }) @@ -311,13 +311,13 @@ func (a *AccountStorageCredentialsPreviewAPI) GetByMetastoreIdAndStorageCredenti // // Gets a list of all storage credentials that have been assigned to given // metastore. -func (a *AccountStorageCredentialsPreviewAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListAccountStorageCredentialsResponse, error) { - return a.accountStorageCredentialsPreviewImpl.internalList(ctx, ListAccountStorageCredentialsRequest{ +func (a *AccountStorageCredentialsAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListAccountStorageCredentialsResponse, error) { + return a.accountStorageCredentialsImpl.internalList(ctx, ListAccountStorageCredentialsRequest{ MetastoreId: metastoreId, }) } -type ArtifactAllowlistsPreviewInterface interface { +type ArtifactAllowlistsInterface interface { // Get an artifact allowlist. // @@ -339,9 +339,9 @@ type ArtifactAllowlistsPreviewInterface interface { Update(ctx context.Context, request SetArtifactAllowlist) (*ArtifactAllowlistInfo, error) } -func NewArtifactAllowlistsPreview(client *client.DatabricksClient) *ArtifactAllowlistsPreviewAPI { - return &ArtifactAllowlistsPreviewAPI{ - artifactAllowlistsPreviewImpl: artifactAllowlistsPreviewImpl{ +func NewArtifactAllowlists(client *client.DatabricksClient) *ArtifactAllowlistsAPI { + return &ArtifactAllowlistsAPI{ + artifactAllowlistsImpl: artifactAllowlistsImpl{ client: client, }, } @@ -350,21 +350,21 @@ func NewArtifactAllowlistsPreview(client *client.DatabricksClient) *ArtifactAllo // In Databricks Runtime 13.3 and above, you can add libraries and init scripts // to the `allowlist` in UC so that users can leverage these artifacts on // compute configured with shared access mode. -type ArtifactAllowlistsPreviewAPI struct { - artifactAllowlistsPreviewImpl +type ArtifactAllowlistsAPI struct { + artifactAllowlistsImpl } // Get an artifact allowlist. // // Get the artifact allowlist of a certain artifact type. The caller must be a // metastore admin or have the **MANAGE ALLOWLIST** privilege on the metastore. -func (a *ArtifactAllowlistsPreviewAPI) GetByArtifactType(ctx context.Context, artifactType ArtifactType) (*ArtifactAllowlistInfo, error) { - return a.artifactAllowlistsPreviewImpl.Get(ctx, GetArtifactAllowlistRequest{ +func (a *ArtifactAllowlistsAPI) GetByArtifactType(ctx context.Context, artifactType ArtifactType) (*ArtifactAllowlistInfo, error) { + return a.artifactAllowlistsImpl.Get(ctx, GetArtifactAllowlistRequest{ ArtifactType: artifactType, }) } -type CatalogsPreviewInterface interface { +type CatalogsInterface interface { // Create a catalog. // @@ -428,9 +428,9 @@ type CatalogsPreviewInterface interface { Update(ctx context.Context, request UpdateCatalog) (*CatalogInfo, error) } -func NewCatalogsPreview(client *client.DatabricksClient) *CatalogsPreviewAPI { - return &CatalogsPreviewAPI{ - catalogsPreviewImpl: catalogsPreviewImpl{ +func NewCatalogs(client *client.DatabricksClient) *CatalogsAPI { + return &CatalogsAPI{ + catalogsImpl: catalogsImpl{ client: client, }, } @@ -444,16 +444,16 @@ func NewCatalogsPreview(client *client.DatabricksClient) *CatalogsPreviewAPI { // data centrally across all of the workspaces in a Databricks account. Users in // different workspaces can share access to the same data, depending on // privileges granted centrally in Unity Catalog. -type CatalogsPreviewAPI struct { - catalogsPreviewImpl +type CatalogsAPI struct { + catalogsImpl } // Delete a catalog. // // Deletes the catalog that matches the supplied name. The caller must be a // metastore admin or the owner of the catalog. -func (a *CatalogsPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.catalogsPreviewImpl.Delete(ctx, DeleteCatalogRequest{ +func (a *CatalogsAPI) DeleteByName(ctx context.Context, name string) error { + return a.catalogsImpl.Delete(ctx, DeleteCatalogRequest{ Name: name, }) } @@ -463,13 +463,13 @@ func (a *CatalogsPreviewAPI) DeleteByName(ctx context.Context, name string) erro // Gets the specified catalog in a metastore. The caller must be a metastore // admin, the owner of the catalog, or a user that has the **USE_CATALOG** // privilege set for their account. -func (a *CatalogsPreviewAPI) GetByName(ctx context.Context, name string) (*CatalogInfo, error) { - return a.catalogsPreviewImpl.Get(ctx, GetCatalogRequest{ +func (a *CatalogsAPI) GetByName(ctx context.Context, name string) (*CatalogInfo, error) { + return a.catalogsImpl.Get(ctx, GetCatalogRequest{ Name: name, }) } -type ConnectionsPreviewInterface interface { +type ConnectionsInterface interface { // Create a connection. // @@ -514,7 +514,7 @@ type ConnectionsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListConnectionsRequest) ([]ConnectionInfo, error) - // ConnectionInfoNameToFullNameMap calls [ConnectionsPreviewAPI.ListAll] and creates a map of results with [ConnectionInfo].Name as key and [ConnectionInfo].FullName as value. + // ConnectionInfoNameToFullNameMap calls [ConnectionsAPI.ListAll] and creates a map of results with [ConnectionInfo].Name as key and [ConnectionInfo].FullName as value. // // Returns an error if there's more than one [ConnectionInfo] with the same .Name. // @@ -529,9 +529,9 @@ type ConnectionsPreviewInterface interface { Update(ctx context.Context, request UpdateConnection) (*ConnectionInfo, error) } -func NewConnectionsPreview(client *client.DatabricksClient) *ConnectionsPreviewAPI { - return &ConnectionsPreviewAPI{ - connectionsPreviewImpl: connectionsPreviewImpl{ +func NewConnections(client *client.DatabricksClient) *ConnectionsAPI { + return &ConnectionsAPI{ + connectionsImpl: connectionsImpl{ client: client, }, } @@ -548,15 +548,15 @@ func NewConnectionsPreview(client *client.DatabricksClient) *ConnectionsPreviewA // Users may create different types of connections with each connection having a // unique set of configuration options to support credential management and // other settings. -type ConnectionsPreviewAPI struct { - connectionsPreviewImpl +type ConnectionsAPI struct { + connectionsImpl } // Delete a connection. // // Deletes the connection that matches the supplied name. -func (a *ConnectionsPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.connectionsPreviewImpl.Delete(ctx, DeleteConnectionRequest{ +func (a *ConnectionsAPI) DeleteByName(ctx context.Context, name string) error { + return a.connectionsImpl.Delete(ctx, DeleteConnectionRequest{ Name: name, }) } @@ -564,20 +564,20 @@ func (a *ConnectionsPreviewAPI) DeleteByName(ctx context.Context, name string) e // Get a connection. // // Gets a connection from it's name. -func (a *ConnectionsPreviewAPI) GetByName(ctx context.Context, name string) (*ConnectionInfo, error) { - return a.connectionsPreviewImpl.Get(ctx, GetConnectionRequest{ +func (a *ConnectionsAPI) GetByName(ctx context.Context, name string) (*ConnectionInfo, error) { + return a.connectionsImpl.Get(ctx, GetConnectionRequest{ Name: name, }) } -// ConnectionInfoNameToFullNameMap calls [ConnectionsPreviewAPI.ListAll] and creates a map of results with [ConnectionInfo].Name as key and [ConnectionInfo].FullName as value. +// ConnectionInfoNameToFullNameMap calls [ConnectionsAPI.ListAll] and creates a map of results with [ConnectionInfo].Name as key and [ConnectionInfo].FullName as value. // // Returns an error if there's more than one [ConnectionInfo] with the same .Name. // // Note: All [ConnectionInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ConnectionsPreviewAPI) ConnectionInfoNameToFullNameMap(ctx context.Context, request ListConnectionsRequest) (map[string]string, error) { +func (a *ConnectionsAPI) ConnectionInfoNameToFullNameMap(ctx context.Context, request ListConnectionsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -595,7 +595,7 @@ func (a *ConnectionsPreviewAPI) ConnectionInfoNameToFullNameMap(ctx context.Cont return mapping, nil } -type CredentialsPreviewInterface interface { +type CredentialsInterface interface { // Create a credential. // @@ -693,9 +693,9 @@ type CredentialsPreviewInterface interface { ValidateCredential(ctx context.Context, request ValidateCredentialRequest) (*ValidateCredentialResponse, error) } -func NewCredentialsPreview(client *client.DatabricksClient) *CredentialsPreviewAPI { - return &CredentialsPreviewAPI{ - credentialsPreviewImpl: credentialsPreviewImpl{ +func NewCredentials(client *client.DatabricksClient) *CredentialsAPI { + return &CredentialsAPI{ + credentialsImpl: credentialsImpl{ client: client, }, } @@ -709,16 +709,16 @@ func NewCredentialsPreview(client *client.DatabricksClient) *CredentialsPreviewA // To create credentials, you must be a Databricks account admin or have the // `CREATE SERVICE CREDENTIAL` privilege. The user who creates the credential // can delegate ownership to another user or group to manage permissions on it. -type CredentialsPreviewAPI struct { - credentialsPreviewImpl +type CredentialsAPI struct { + credentialsImpl } // Delete a credential. // // Deletes a service or storage credential from the metastore. The caller must // be an owner of the credential. -func (a *CredentialsPreviewAPI) DeleteCredentialByNameArg(ctx context.Context, nameArg string) error { - return a.credentialsPreviewImpl.DeleteCredential(ctx, DeleteCredentialRequest{ +func (a *CredentialsAPI) DeleteCredentialByNameArg(ctx context.Context, nameArg string) error { + return a.credentialsImpl.DeleteCredential(ctx, DeleteCredentialRequest{ NameArg: nameArg, }) } @@ -728,13 +728,13 @@ func (a *CredentialsPreviewAPI) DeleteCredentialByNameArg(ctx context.Context, n // Gets a service or storage credential from the metastore. The caller must be a // metastore admin, the owner of the credential, or have any permission on the // credential. -func (a *CredentialsPreviewAPI) GetCredentialByNameArg(ctx context.Context, nameArg string) (*CredentialInfo, error) { - return a.credentialsPreviewImpl.GetCredential(ctx, GetCredentialRequest{ +func (a *CredentialsAPI) GetCredentialByNameArg(ctx context.Context, nameArg string) (*CredentialInfo, error) { + return a.credentialsImpl.GetCredential(ctx, GetCredentialRequest{ NameArg: nameArg, }) } -type ExternalLocationsPreviewInterface interface { +type ExternalLocationsInterface interface { // Create an external location. // @@ -799,9 +799,9 @@ type ExternalLocationsPreviewInterface interface { Update(ctx context.Context, request UpdateExternalLocation) (*ExternalLocationInfo, error) } -func NewExternalLocationsPreview(client *client.DatabricksClient) *ExternalLocationsPreviewAPI { - return &ExternalLocationsPreviewAPI{ - externalLocationsPreviewImpl: externalLocationsPreviewImpl{ +func NewExternalLocations(client *client.DatabricksClient) *ExternalLocationsAPI { + return &ExternalLocationsAPI{ + externalLocationsImpl: externalLocationsImpl{ client: client, }, } @@ -820,16 +820,16 @@ func NewExternalLocationsPreview(client *client.DatabricksClient) *ExternalLocat // // To create external locations, you must be a metastore admin or a user with // the **CREATE_EXTERNAL_LOCATION** privilege. -type ExternalLocationsPreviewAPI struct { - externalLocationsPreviewImpl +type ExternalLocationsAPI struct { + externalLocationsImpl } // Delete an external location. // // Deletes the specified external location from the metastore. The caller must // be the owner of the external location. -func (a *ExternalLocationsPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.externalLocationsPreviewImpl.Delete(ctx, DeleteExternalLocationRequest{ +func (a *ExternalLocationsAPI) DeleteByName(ctx context.Context, name string) error { + return a.externalLocationsImpl.Delete(ctx, DeleteExternalLocationRequest{ Name: name, }) } @@ -839,13 +839,13 @@ func (a *ExternalLocationsPreviewAPI) DeleteByName(ctx context.Context, name str // Gets an external location from the metastore. The caller must be either a // metastore admin, the owner of the external location, or a user that has some // privilege on the external location. -func (a *ExternalLocationsPreviewAPI) GetByName(ctx context.Context, name string) (*ExternalLocationInfo, error) { - return a.externalLocationsPreviewImpl.Get(ctx, GetExternalLocationRequest{ +func (a *ExternalLocationsAPI) GetByName(ctx context.Context, name string) (*ExternalLocationInfo, error) { + return a.externalLocationsImpl.Get(ctx, GetExternalLocationRequest{ Name: name, }) } -type FunctionsPreviewInterface interface { +type FunctionsInterface interface { // Create a function. // @@ -928,7 +928,7 @@ type FunctionsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListFunctionsRequest) ([]FunctionInfo, error) - // FunctionInfoNameToFullNameMap calls [FunctionsPreviewAPI.ListAll] and creates a map of results with [FunctionInfo].Name as key and [FunctionInfo].FullName as value. + // FunctionInfoNameToFullNameMap calls [FunctionsAPI.ListAll] and creates a map of results with [FunctionInfo].Name as key and [FunctionInfo].FullName as value. // // Returns an error if there's more than one [FunctionInfo] with the same .Name. // @@ -950,9 +950,9 @@ type FunctionsPreviewInterface interface { Update(ctx context.Context, request UpdateFunction) (*FunctionInfo, error) } -func NewFunctionsPreview(client *client.DatabricksClient) *FunctionsPreviewAPI { - return &FunctionsPreviewAPI{ - functionsPreviewImpl: functionsPreviewImpl{ +func NewFunctions(client *client.DatabricksClient) *FunctionsAPI { + return &FunctionsAPI{ + functionsImpl: functionsImpl{ client: client, }, } @@ -964,8 +964,8 @@ func NewFunctionsPreview(client *client.DatabricksClient) *FunctionsPreviewAPI { // invoked wherever a table reference is allowed in a query. In Unity Catalog, a // function resides at the same level as a table, so it can be referenced with // the form __catalog_name__.__schema_name__.__function_name__. -type FunctionsPreviewAPI struct { - functionsPreviewImpl +type FunctionsAPI struct { + functionsImpl } // Delete a function. @@ -977,8 +977,8 @@ type FunctionsPreviewAPI struct { // Is the owner of the function itself and have both the **USE_CATALOG** // privilege on its parent catalog and the **USE_SCHEMA** privilege on its // parent schema -func (a *FunctionsPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.functionsPreviewImpl.Delete(ctx, DeleteFunctionRequest{ +func (a *FunctionsAPI) DeleteByName(ctx context.Context, name string) error { + return a.functionsImpl.Delete(ctx, DeleteFunctionRequest{ Name: name, }) } @@ -992,20 +992,20 @@ func (a *FunctionsPreviewAPI) DeleteByName(ctx context.Context, name string) err // of the function - Have the **USE_CATALOG** privilege on the function's parent // catalog, the **USE_SCHEMA** privilege on the function's parent schema, and // the **EXECUTE** privilege on the function itself -func (a *FunctionsPreviewAPI) GetByName(ctx context.Context, name string) (*FunctionInfo, error) { - return a.functionsPreviewImpl.Get(ctx, GetFunctionRequest{ +func (a *FunctionsAPI) GetByName(ctx context.Context, name string) (*FunctionInfo, error) { + return a.functionsImpl.Get(ctx, GetFunctionRequest{ Name: name, }) } -// FunctionInfoNameToFullNameMap calls [FunctionsPreviewAPI.ListAll] and creates a map of results with [FunctionInfo].Name as key and [FunctionInfo].FullName as value. +// FunctionInfoNameToFullNameMap calls [FunctionsAPI.ListAll] and creates a map of results with [FunctionInfo].Name as key and [FunctionInfo].FullName as value. // // Returns an error if there's more than one [FunctionInfo] with the same .Name. // // Note: All [FunctionInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *FunctionsPreviewAPI) FunctionInfoNameToFullNameMap(ctx context.Context, request ListFunctionsRequest) (map[string]string, error) { +func (a *FunctionsAPI) FunctionInfoNameToFullNameMap(ctx context.Context, request ListFunctionsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -1023,7 +1023,7 @@ func (a *FunctionsPreviewAPI) FunctionInfoNameToFullNameMap(ctx context.Context, return mapping, nil } -type GrantsPreviewInterface interface { +type GrantsInterface interface { // Get permissions. // @@ -1051,9 +1051,9 @@ type GrantsPreviewInterface interface { Update(ctx context.Context, request UpdatePermissions) (*PermissionsList, error) } -func NewGrantsPreview(client *client.DatabricksClient) *GrantsPreviewAPI { - return &GrantsPreviewAPI{ - grantsPreviewImpl: grantsPreviewImpl{ +func NewGrants(client *client.DatabricksClient) *GrantsAPI { + return &GrantsAPI{ + grantsImpl: grantsImpl{ client: client, }, } @@ -1070,15 +1070,15 @@ func NewGrantsPreview(client *client.DatabricksClient) *GrantsPreviewAPI { // automatically grants the privilege to all current and future objects within // the catalog. Similarly, privileges granted on a schema are inherited by all // current and future objects within that schema. -type GrantsPreviewAPI struct { - grantsPreviewImpl +type GrantsAPI struct { + grantsImpl } // Get permissions. // // Gets the permissions for a securable. -func (a *GrantsPreviewAPI) GetBySecurableTypeAndFullName(ctx context.Context, securableType SecurableType, fullName string) (*PermissionsList, error) { - return a.grantsPreviewImpl.Get(ctx, GetGrantRequest{ +func (a *GrantsAPI) GetBySecurableTypeAndFullName(ctx context.Context, securableType SecurableType, fullName string) (*PermissionsList, error) { + return a.grantsImpl.Get(ctx, GetGrantRequest{ SecurableType: securableType, FullName: fullName, }) @@ -1087,14 +1087,14 @@ func (a *GrantsPreviewAPI) GetBySecurableTypeAndFullName(ctx context.Context, se // Get effective permissions. // // Gets the effective permissions for a securable. -func (a *GrantsPreviewAPI) GetEffectiveBySecurableTypeAndFullName(ctx context.Context, securableType SecurableType, fullName string) (*EffectivePermissionsList, error) { - return a.grantsPreviewImpl.GetEffective(ctx, GetEffectiveRequest{ +func (a *GrantsAPI) GetEffectiveBySecurableTypeAndFullName(ctx context.Context, securableType SecurableType, fullName string) (*EffectivePermissionsList, error) { + return a.grantsImpl.GetEffective(ctx, GetEffectiveRequest{ SecurableType: securableType, FullName: fullName, }) } -type MetastoresPreviewInterface interface { +type MetastoresInterface interface { // Create an assignment. // @@ -1157,7 +1157,7 @@ type MetastoresPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context) ([]MetastoreInfo, error) - // MetastoreInfoNameToMetastoreIdMap calls [MetastoresPreviewAPI.ListAll] and creates a map of results with [MetastoreInfo].Name as key and [MetastoreInfo].MetastoreId as value. + // MetastoreInfoNameToMetastoreIdMap calls [MetastoresAPI.ListAll] and creates a map of results with [MetastoreInfo].Name as key and [MetastoreInfo].MetastoreId as value. // // Returns an error if there's more than one [MetastoreInfo] with the same .Name. // @@ -1166,7 +1166,7 @@ type MetastoresPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. MetastoreInfoNameToMetastoreIdMap(ctx context.Context) (map[string]string, error) - // GetByName calls [MetastoresPreviewAPI.MetastoreInfoNameToMetastoreIdMap] and returns a single [MetastoreInfo]. + // GetByName calls [MetastoresAPI.MetastoreInfoNameToMetastoreIdMap] and returns a single [MetastoreInfo]. // // Returns an error if there's more than one [MetastoreInfo] with the same .Name. // @@ -1208,9 +1208,9 @@ type MetastoresPreviewInterface interface { UpdateAssignment(ctx context.Context, request UpdateMetastoreAssignment) error } -func NewMetastoresPreview(client *client.DatabricksClient) *MetastoresPreviewAPI { - return &MetastoresPreviewAPI{ - metastoresPreviewImpl: metastoresPreviewImpl{ +func NewMetastores(client *client.DatabricksClient) *MetastoresAPI { + return &MetastoresAPI{ + metastoresImpl: metastoresImpl{ client: client, }, } @@ -1230,15 +1230,15 @@ func NewMetastoresPreview(client *client.DatabricksClient) *MetastoresPreviewAPI // workspaces created before Unity Catalog was released. If your workspace // includes a legacy Hive metastore, the data in that metastore is available in // a catalog named hive_metastore. -type MetastoresPreviewAPI struct { - metastoresPreviewImpl +type MetastoresAPI struct { + metastoresImpl } // Delete a metastore. // // Deletes a metastore. The caller must be a metastore admin. -func (a *MetastoresPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.metastoresPreviewImpl.Delete(ctx, DeleteMetastoreRequest{ +func (a *MetastoresAPI) DeleteById(ctx context.Context, id string) error { + return a.metastoresImpl.Delete(ctx, DeleteMetastoreRequest{ Id: id, }) } @@ -1247,20 +1247,20 @@ func (a *MetastoresPreviewAPI) DeleteById(ctx context.Context, id string) error // // Gets a metastore that matches the supplied ID. The caller must be a metastore // admin to retrieve this info. -func (a *MetastoresPreviewAPI) GetById(ctx context.Context, id string) (*MetastoreInfo, error) { - return a.metastoresPreviewImpl.Get(ctx, GetMetastoreRequest{ +func (a *MetastoresAPI) GetById(ctx context.Context, id string) (*MetastoreInfo, error) { + return a.metastoresImpl.Get(ctx, GetMetastoreRequest{ Id: id, }) } -// MetastoreInfoNameToMetastoreIdMap calls [MetastoresPreviewAPI.ListAll] and creates a map of results with [MetastoreInfo].Name as key and [MetastoreInfo].MetastoreId as value. +// MetastoreInfoNameToMetastoreIdMap calls [MetastoresAPI.ListAll] and creates a map of results with [MetastoreInfo].Name as key and [MetastoreInfo].MetastoreId as value. // // Returns an error if there's more than one [MetastoreInfo] with the same .Name. // // Note: All [MetastoreInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *MetastoresPreviewAPI) MetastoreInfoNameToMetastoreIdMap(ctx context.Context) (map[string]string, error) { +func (a *MetastoresAPI) MetastoreInfoNameToMetastoreIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx) @@ -1278,14 +1278,14 @@ func (a *MetastoresPreviewAPI) MetastoreInfoNameToMetastoreIdMap(ctx context.Con return mapping, nil } -// GetByName calls [MetastoresPreviewAPI.MetastoreInfoNameToMetastoreIdMap] and returns a single [MetastoreInfo]. +// GetByName calls [MetastoresAPI.MetastoreInfoNameToMetastoreIdMap] and returns a single [MetastoreInfo]. // // Returns an error if there's more than one [MetastoreInfo] with the same .Name. // // Note: All [MetastoreInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *MetastoresPreviewAPI) GetByName(ctx context.Context, name string) (*MetastoreInfo, error) { +func (a *MetastoresAPI) GetByName(ctx context.Context, name string) (*MetastoreInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx) if err != nil { @@ -1309,13 +1309,13 @@ func (a *MetastoresPreviewAPI) GetByName(ctx context.Context, name string) (*Met // Delete an assignment. // // Deletes a metastore assignment. The caller must be an account administrator. -func (a *MetastoresPreviewAPI) UnassignByWorkspaceId(ctx context.Context, workspaceId int64) error { - return a.metastoresPreviewImpl.Unassign(ctx, UnassignRequest{ +func (a *MetastoresAPI) UnassignByWorkspaceId(ctx context.Context, workspaceId int64) error { + return a.metastoresImpl.Unassign(ctx, UnassignRequest{ WorkspaceId: workspaceId, }) } -type ModelVersionsPreviewInterface interface { +type ModelVersionsInterface interface { // Delete a Model Version. // @@ -1447,9 +1447,9 @@ type ModelVersionsPreviewInterface interface { Update(ctx context.Context, request UpdateModelVersionRequest) (*ModelVersionInfo, error) } -func NewModelVersionsPreview(client *client.DatabricksClient) *ModelVersionsPreviewAPI { - return &ModelVersionsPreviewAPI{ - modelVersionsPreviewImpl: modelVersionsPreviewImpl{ +func NewModelVersions(client *client.DatabricksClient) *ModelVersionsAPI { + return &ModelVersionsAPI{ + modelVersionsImpl: modelVersionsImpl{ client: client, }, } @@ -1462,8 +1462,8 @@ func NewModelVersionsPreview(client *client.DatabricksClient) *ModelVersionsPrev // This API reference documents the REST endpoints for managing model versions // in Unity Catalog. For more details, see the [registered models API // docs](/api/workspace/registeredmodels). -type ModelVersionsPreviewAPI struct { - modelVersionsPreviewImpl +type ModelVersionsAPI struct { + modelVersionsImpl } // Delete a Model Version. @@ -1475,8 +1475,8 @@ type ModelVersionsPreviewAPI struct { // model. For the latter case, the caller must also be the owner or have the // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** // privilege on the parent schema. -func (a *ModelVersionsPreviewAPI) DeleteByFullNameAndVersion(ctx context.Context, fullName string, version int) error { - return a.modelVersionsPreviewImpl.Delete(ctx, DeleteModelVersionRequest{ +func (a *ModelVersionsAPI) DeleteByFullNameAndVersion(ctx context.Context, fullName string, version int) error { + return a.modelVersionsImpl.Delete(ctx, DeleteModelVersionRequest{ FullName: fullName, Version: version, }) @@ -1490,8 +1490,8 @@ func (a *ModelVersionsPreviewAPI) DeleteByFullNameAndVersion(ctx context.Context // privilege on) the parent registered model. For the latter case, the caller // must also be the owner or have the **USE_CATALOG** privilege on the parent // catalog and the **USE_SCHEMA** privilege on the parent schema. -func (a *ModelVersionsPreviewAPI) GetByFullNameAndVersion(ctx context.Context, fullName string, version int) (*ModelVersionInfo, error) { - return a.modelVersionsPreviewImpl.Get(ctx, GetModelVersionRequest{ +func (a *ModelVersionsAPI) GetByFullNameAndVersion(ctx context.Context, fullName string, version int) (*ModelVersionInfo, error) { + return a.modelVersionsImpl.Get(ctx, GetModelVersionRequest{ FullName: fullName, Version: version, }) @@ -1505,8 +1505,8 @@ func (a *ModelVersionsPreviewAPI) GetByFullNameAndVersion(ctx context.Context, f // privilege on) the registered model. For the latter case, the caller must also // be the owner or have the **USE_CATALOG** privilege on the parent catalog and // the **USE_SCHEMA** privilege on the parent schema. -func (a *ModelVersionsPreviewAPI) GetByAliasByFullNameAndAlias(ctx context.Context, fullName string, alias string) (*ModelVersionInfo, error) { - return a.modelVersionsPreviewImpl.GetByAlias(ctx, GetByAliasRequest{ +func (a *ModelVersionsAPI) GetByAliasByFullNameAndAlias(ctx context.Context, fullName string, alias string) (*ModelVersionInfo, error) { + return a.modelVersionsImpl.GetByAlias(ctx, GetByAliasRequest{ FullName: fullName, Alias: alias, }) @@ -1527,13 +1527,13 @@ func (a *ModelVersionsPreviewAPI) GetByAliasByFullNameAndAlias(ctx context.Conte // // There is no guarantee of a specific ordering of the elements in the response. // The elements in the response will not contain any aliases or tags. -func (a *ModelVersionsPreviewAPI) ListByFullName(ctx context.Context, fullName string) (*ListModelVersionsResponse, error) { - return a.modelVersionsPreviewImpl.internalList(ctx, ListModelVersionsRequest{ +func (a *ModelVersionsAPI) ListByFullName(ctx context.Context, fullName string) (*ListModelVersionsResponse, error) { + return a.modelVersionsImpl.internalList(ctx, ListModelVersionsRequest{ FullName: fullName, }) } -type OnlineTablesPreviewInterface interface { +type OnlineTablesInterface interface { // Create an Online Table. // @@ -1565,9 +1565,9 @@ type OnlineTablesPreviewInterface interface { GetByName(ctx context.Context, name string) (*OnlineTable, error) } -func NewOnlineTablesPreview(client *client.DatabricksClient) *OnlineTablesPreviewAPI { - return &OnlineTablesPreviewAPI{ - onlineTablesPreviewImpl: onlineTablesPreviewImpl{ +func NewOnlineTables(client *client.DatabricksClient) *OnlineTablesAPI { + return &OnlineTablesAPI{ + onlineTablesImpl: onlineTablesImpl{ client: client, }, } @@ -1575,8 +1575,8 @@ func NewOnlineTablesPreview(client *client.DatabricksClient) *OnlineTablesPrevie // Online tables provide lower latency and higher QPS access to data from Delta // tables. -type OnlineTablesPreviewAPI struct { - onlineTablesPreviewImpl +type OnlineTablesAPI struct { + onlineTablesImpl } // Delete an Online Table. @@ -1584,8 +1584,8 @@ type OnlineTablesPreviewAPI struct { // Delete an online table. Warning: This will delete all the data in the online // table. If the source Delta table was deleted or modified since this Online // Table was created, this will lose the data forever! -func (a *OnlineTablesPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.onlineTablesPreviewImpl.Delete(ctx, DeleteOnlineTableRequest{ +func (a *OnlineTablesAPI) DeleteByName(ctx context.Context, name string) error { + return a.onlineTablesImpl.Delete(ctx, DeleteOnlineTableRequest{ Name: name, }) } @@ -1593,13 +1593,13 @@ func (a *OnlineTablesPreviewAPI) DeleteByName(ctx context.Context, name string) // Get an Online Table. // // Get information about an existing online table and its status. -func (a *OnlineTablesPreviewAPI) GetByName(ctx context.Context, name string) (*OnlineTable, error) { - return a.onlineTablesPreviewImpl.Get(ctx, GetOnlineTableRequest{ +func (a *OnlineTablesAPI) GetByName(ctx context.Context, name string) (*OnlineTable, error) { + return a.onlineTablesImpl.Get(ctx, GetOnlineTableRequest{ Name: name, }) } -type QualityMonitorsPreviewInterface interface { +type QualityMonitorsInterface interface { // Cancel refresh. // @@ -1803,9 +1803,9 @@ type QualityMonitorsPreviewInterface interface { Update(ctx context.Context, request UpdateMonitor) (*MonitorInfo, error) } -func NewQualityMonitorsPreview(client *client.DatabricksClient) *QualityMonitorsPreviewAPI { - return &QualityMonitorsPreviewAPI{ - qualityMonitorsPreviewImpl: qualityMonitorsPreviewImpl{ +func NewQualityMonitors(client *client.DatabricksClient) *QualityMonitorsAPI { + return &QualityMonitorsAPI{ + qualityMonitorsImpl: qualityMonitorsImpl{ client: client, }, } @@ -1819,8 +1819,8 @@ func NewQualityMonitorsPreview(client *client.DatabricksClient) *QualityMonitors // parent schema or parent catalog). Viewing the dashboard, computed metrics, or // monitor configuration only requires the user to have **SELECT** privileges on // the table (along with **USE_SCHEMA** and **USE_CATALOG**). -type QualityMonitorsPreviewAPI struct { - qualityMonitorsPreviewImpl +type QualityMonitorsAPI struct { + qualityMonitorsImpl } // Delete a table monitor. @@ -1838,8 +1838,8 @@ type QualityMonitorsPreviewAPI struct { // // Note that the metric tables and dashboard will not be deleted as part of this // call; those assets must be manually cleaned up (if desired). -func (a *QualityMonitorsPreviewAPI) DeleteByTableName(ctx context.Context, tableName string) error { - return a.qualityMonitorsPreviewImpl.Delete(ctx, DeleteQualityMonitorRequest{ +func (a *QualityMonitorsAPI) DeleteByTableName(ctx context.Context, tableName string) error { + return a.qualityMonitorsImpl.Delete(ctx, DeleteQualityMonitorRequest{ TableName: tableName, }) } @@ -1858,8 +1858,8 @@ func (a *QualityMonitorsPreviewAPI) DeleteByTableName(ctx context.Context, table // information on assets created by the monitor. Some information (e.g., // dashboard) may be filtered out if the caller is in a different workspace than // where the monitor was created. -func (a *QualityMonitorsPreviewAPI) GetByTableName(ctx context.Context, tableName string) (*MonitorInfo, error) { - return a.qualityMonitorsPreviewImpl.Get(ctx, GetQualityMonitorRequest{ +func (a *QualityMonitorsAPI) GetByTableName(ctx context.Context, tableName string) (*MonitorInfo, error) { + return a.qualityMonitorsImpl.Get(ctx, GetQualityMonitorRequest{ TableName: tableName, }) } @@ -1876,8 +1876,8 @@ func (a *QualityMonitorsPreviewAPI) GetByTableName(ctx context.Context, tableNam // // Additionally, the call must be made from the workspace where the monitor was // created. -func (a *QualityMonitorsPreviewAPI) GetRefreshByTableNameAndRefreshId(ctx context.Context, tableName string, refreshId string) (*MonitorRefreshInfo, error) { - return a.qualityMonitorsPreviewImpl.GetRefresh(ctx, GetRefreshRequest{ +func (a *QualityMonitorsAPI) GetRefreshByTableNameAndRefreshId(ctx context.Context, tableName string, refreshId string) (*MonitorRefreshInfo, error) { + return a.qualityMonitorsImpl.GetRefresh(ctx, GetRefreshRequest{ TableName: tableName, RefreshId: refreshId, }) @@ -1896,13 +1896,13 @@ func (a *QualityMonitorsPreviewAPI) GetRefreshByTableNameAndRefreshId(ctx contex // // Additionally, the call must be made from the workspace where the monitor was // created. -func (a *QualityMonitorsPreviewAPI) ListRefreshesByTableName(ctx context.Context, tableName string) (*MonitorRefreshListResponse, error) { - return a.qualityMonitorsPreviewImpl.ListRefreshes(ctx, ListRefreshesRequest{ +func (a *QualityMonitorsAPI) ListRefreshesByTableName(ctx context.Context, tableName string) (*MonitorRefreshListResponse, error) { + return a.qualityMonitorsImpl.ListRefreshes(ctx, ListRefreshesRequest{ TableName: tableName, }) } -type RegisteredModelsPreviewInterface interface { +type RegisteredModelsInterface interface { // Create a Registered Model. // @@ -2018,7 +2018,7 @@ type RegisteredModelsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListRegisteredModelsRequest) ([]RegisteredModelInfo, error) - // RegisteredModelInfoNameToFullNameMap calls [RegisteredModelsPreviewAPI.ListAll] and creates a map of results with [RegisteredModelInfo].Name as key and [RegisteredModelInfo].FullName as value. + // RegisteredModelInfoNameToFullNameMap calls [RegisteredModelsAPI.ListAll] and creates a map of results with [RegisteredModelInfo].Name as key and [RegisteredModelInfo].FullName as value. // // Returns an error if there's more than one [RegisteredModelInfo] with the same .Name. // @@ -2027,7 +2027,7 @@ type RegisteredModelsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. RegisteredModelInfoNameToFullNameMap(ctx context.Context, request ListRegisteredModelsRequest) (map[string]string, error) - // GetByName calls [RegisteredModelsPreviewAPI.RegisteredModelInfoNameToFullNameMap] and returns a single [RegisteredModelInfo]. + // GetByName calls [RegisteredModelsAPI.RegisteredModelInfoNameToFullNameMap] and returns a single [RegisteredModelInfo]. // // Returns an error if there's more than one [RegisteredModelInfo] with the same .Name. // @@ -2060,9 +2060,9 @@ type RegisteredModelsPreviewInterface interface { Update(ctx context.Context, request UpdateRegisteredModelRequest) (*RegisteredModelInfo, error) } -func NewRegisteredModelsPreview(client *client.DatabricksClient) *RegisteredModelsPreviewAPI { - return &RegisteredModelsPreviewAPI{ - registeredModelsPreviewImpl: registeredModelsPreviewImpl{ +func NewRegisteredModels(client *client.DatabricksClient) *RegisteredModelsAPI { + return &RegisteredModelsAPI{ + registeredModelsImpl: registeredModelsImpl{ client: client, }, } @@ -2097,8 +2097,8 @@ func NewRegisteredModelsPreview(client *client.DatabricksClient) *RegisteredMode // Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. // tagging, grants) that specify a securable type, use "FUNCTION" as the // securable type. -type RegisteredModelsPreviewAPI struct { - registeredModelsPreviewImpl +type RegisteredModelsAPI struct { + registeredModelsImpl } // Delete a Registered Model. @@ -2110,8 +2110,8 @@ type RegisteredModelsPreviewAPI struct { // the latter case, the caller must also be the owner or have the // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** // privilege on the parent schema. -func (a *RegisteredModelsPreviewAPI) DeleteByFullName(ctx context.Context, fullName string) error { - return a.registeredModelsPreviewImpl.Delete(ctx, DeleteRegisteredModelRequest{ +func (a *RegisteredModelsAPI) DeleteByFullName(ctx context.Context, fullName string) error { + return a.registeredModelsImpl.Delete(ctx, DeleteRegisteredModelRequest{ FullName: fullName, }) } @@ -2124,8 +2124,8 @@ func (a *RegisteredModelsPreviewAPI) DeleteByFullName(ctx context.Context, fullN // the latter case, the caller must also be the owner or have the // **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** // privilege on the parent schema. -func (a *RegisteredModelsPreviewAPI) DeleteAliasByFullNameAndAlias(ctx context.Context, fullName string, alias string) error { - return a.registeredModelsPreviewImpl.DeleteAlias(ctx, DeleteAliasRequest{ +func (a *RegisteredModelsAPI) DeleteAliasByFullNameAndAlias(ctx context.Context, fullName string, alias string) error { + return a.registeredModelsImpl.DeleteAlias(ctx, DeleteAliasRequest{ FullName: fullName, Alias: alias, }) @@ -2139,20 +2139,20 @@ func (a *RegisteredModelsPreviewAPI) DeleteAliasByFullNameAndAlias(ctx context.C // privilege on) the registered model. For the latter case, the caller must also // be the owner or have the **USE_CATALOG** privilege on the parent catalog and // the **USE_SCHEMA** privilege on the parent schema. -func (a *RegisteredModelsPreviewAPI) GetByFullName(ctx context.Context, fullName string) (*RegisteredModelInfo, error) { - return a.registeredModelsPreviewImpl.Get(ctx, GetRegisteredModelRequest{ +func (a *RegisteredModelsAPI) GetByFullName(ctx context.Context, fullName string) (*RegisteredModelInfo, error) { + return a.registeredModelsImpl.Get(ctx, GetRegisteredModelRequest{ FullName: fullName, }) } -// RegisteredModelInfoNameToFullNameMap calls [RegisteredModelsPreviewAPI.ListAll] and creates a map of results with [RegisteredModelInfo].Name as key and [RegisteredModelInfo].FullName as value. +// RegisteredModelInfoNameToFullNameMap calls [RegisteredModelsAPI.ListAll] and creates a map of results with [RegisteredModelInfo].Name as key and [RegisteredModelInfo].FullName as value. // // Returns an error if there's more than one [RegisteredModelInfo] with the same .Name. // // Note: All [RegisteredModelInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *RegisteredModelsPreviewAPI) RegisteredModelInfoNameToFullNameMap(ctx context.Context, request ListRegisteredModelsRequest) (map[string]string, error) { +func (a *RegisteredModelsAPI) RegisteredModelInfoNameToFullNameMap(ctx context.Context, request ListRegisteredModelsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -2170,14 +2170,14 @@ func (a *RegisteredModelsPreviewAPI) RegisteredModelInfoNameToFullNameMap(ctx co return mapping, nil } -// GetByName calls [RegisteredModelsPreviewAPI.RegisteredModelInfoNameToFullNameMap] and returns a single [RegisteredModelInfo]. +// GetByName calls [RegisteredModelsAPI.RegisteredModelInfoNameToFullNameMap] and returns a single [RegisteredModelInfo]. // // Returns an error if there's more than one [RegisteredModelInfo] with the same .Name. // // Note: All [RegisteredModelInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *RegisteredModelsPreviewAPI) GetByName(ctx context.Context, name string) (*RegisteredModelInfo, error) { +func (a *RegisteredModelsAPI) GetByName(ctx context.Context, name string) (*RegisteredModelInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListRegisteredModelsRequest{}) if err != nil { @@ -2198,7 +2198,7 @@ func (a *RegisteredModelsPreviewAPI) GetByName(ctx context.Context, name string) return &alternatives[0], nil } -type ResourceQuotasPreviewInterface interface { +type ResourceQuotasInterface interface { // Get information for a single resource quota. // @@ -2235,9 +2235,9 @@ type ResourceQuotasPreviewInterface interface { ListQuotasAll(ctx context.Context, request ListQuotasRequest) ([]QuotaInfo, error) } -func NewResourceQuotasPreview(client *client.DatabricksClient) *ResourceQuotasPreviewAPI { - return &ResourceQuotasPreviewAPI{ - resourceQuotasPreviewImpl: resourceQuotasPreviewImpl{ +func NewResourceQuotas(client *client.DatabricksClient) *ResourceQuotasAPI { + return &ResourceQuotasAPI{ + resourceQuotasImpl: resourceQuotasImpl{ client: client, }, } @@ -2251,8 +2251,8 @@ func NewResourceQuotasPreview(client *client.DatabricksClient) *ResourceQuotasPr // Catalog documentation]. // // [Unity Catalog documentation]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#resource-quotas -type ResourceQuotasPreviewAPI struct { - resourceQuotasPreviewImpl +type ResourceQuotasAPI struct { + resourceQuotasImpl } // Get information for a single resource quota. @@ -2261,15 +2261,15 @@ type ResourceQuotasPreviewAPI struct { // defined as a child-parent pair. This API also refreshes the quota count if it // is out of date. Refreshes are triggered asynchronously. The updated count // might not be returned in the first call. -func (a *ResourceQuotasPreviewAPI) GetQuotaByParentSecurableTypeAndParentFullNameAndQuotaName(ctx context.Context, parentSecurableType string, parentFullName string, quotaName string) (*GetQuotaResponse, error) { - return a.resourceQuotasPreviewImpl.GetQuota(ctx, GetQuotaRequest{ +func (a *ResourceQuotasAPI) GetQuotaByParentSecurableTypeAndParentFullNameAndQuotaName(ctx context.Context, parentSecurableType string, parentFullName string, quotaName string) (*GetQuotaResponse, error) { + return a.resourceQuotasImpl.GetQuota(ctx, GetQuotaRequest{ ParentSecurableType: parentSecurableType, ParentFullName: parentFullName, QuotaName: quotaName, }) } -type SchemasPreviewInterface interface { +type SchemasInterface interface { // Create a schema. // @@ -2326,7 +2326,7 @@ type SchemasPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListSchemasRequest) ([]SchemaInfo, error) - // SchemaInfoNameToFullNameMap calls [SchemasPreviewAPI.ListAll] and creates a map of results with [SchemaInfo].Name as key and [SchemaInfo].FullName as value. + // SchemaInfoNameToFullNameMap calls [SchemasAPI.ListAll] and creates a map of results with [SchemaInfo].Name as key and [SchemaInfo].FullName as value. // // Returns an error if there's more than one [SchemaInfo] with the same .Name. // @@ -2335,7 +2335,7 @@ type SchemasPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. SchemaInfoNameToFullNameMap(ctx context.Context, request ListSchemasRequest) (map[string]string, error) - // GetByName calls [SchemasPreviewAPI.SchemaInfoNameToFullNameMap] and returns a single [SchemaInfo]. + // GetByName calls [SchemasAPI.SchemaInfoNameToFullNameMap] and returns a single [SchemaInfo]. // // Returns an error if there's more than one [SchemaInfo] with the same .Name. // @@ -2354,9 +2354,9 @@ type SchemasPreviewInterface interface { Update(ctx context.Context, request UpdateSchema) (*SchemaInfo, error) } -func NewSchemasPreview(client *client.DatabricksClient) *SchemasPreviewAPI { - return &SchemasPreviewAPI{ - schemasPreviewImpl: schemasPreviewImpl{ +func NewSchemas(client *client.DatabricksClient) *SchemasAPI { + return &SchemasAPI{ + schemasImpl: schemasImpl{ client: client, }, } @@ -2367,16 +2367,16 @@ func NewSchemasPreview(client *client.DatabricksClient) *SchemasPreviewAPI { // access (or list) a table or view in a schema, users must have the USE_SCHEMA // data permission on the schema and its parent catalog, and they must have the // SELECT permission on the table or view. -type SchemasPreviewAPI struct { - schemasPreviewImpl +type SchemasAPI struct { + schemasImpl } // Delete a schema. // // Deletes the specified schema from the parent catalog. The caller must be the // owner of the schema or an owner of the parent catalog. -func (a *SchemasPreviewAPI) DeleteByFullName(ctx context.Context, fullName string) error { - return a.schemasPreviewImpl.Delete(ctx, DeleteSchemaRequest{ +func (a *SchemasAPI) DeleteByFullName(ctx context.Context, fullName string) error { + return a.schemasImpl.Delete(ctx, DeleteSchemaRequest{ FullName: fullName, }) } @@ -2386,20 +2386,20 @@ func (a *SchemasPreviewAPI) DeleteByFullName(ctx context.Context, fullName strin // Gets the specified schema within the metastore. The caller must be a // metastore admin, the owner of the schema, or a user that has the // **USE_SCHEMA** privilege on the schema. -func (a *SchemasPreviewAPI) GetByFullName(ctx context.Context, fullName string) (*SchemaInfo, error) { - return a.schemasPreviewImpl.Get(ctx, GetSchemaRequest{ +func (a *SchemasAPI) GetByFullName(ctx context.Context, fullName string) (*SchemaInfo, error) { + return a.schemasImpl.Get(ctx, GetSchemaRequest{ FullName: fullName, }) } -// SchemaInfoNameToFullNameMap calls [SchemasPreviewAPI.ListAll] and creates a map of results with [SchemaInfo].Name as key and [SchemaInfo].FullName as value. +// SchemaInfoNameToFullNameMap calls [SchemasAPI.ListAll] and creates a map of results with [SchemaInfo].Name as key and [SchemaInfo].FullName as value. // // Returns an error if there's more than one [SchemaInfo] with the same .Name. // // Note: All [SchemaInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *SchemasPreviewAPI) SchemaInfoNameToFullNameMap(ctx context.Context, request ListSchemasRequest) (map[string]string, error) { +func (a *SchemasAPI) SchemaInfoNameToFullNameMap(ctx context.Context, request ListSchemasRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -2417,14 +2417,14 @@ func (a *SchemasPreviewAPI) SchemaInfoNameToFullNameMap(ctx context.Context, req return mapping, nil } -// GetByName calls [SchemasPreviewAPI.SchemaInfoNameToFullNameMap] and returns a single [SchemaInfo]. +// GetByName calls [SchemasAPI.SchemaInfoNameToFullNameMap] and returns a single [SchemaInfo]. // // Returns an error if there's more than one [SchemaInfo] with the same .Name. // // Note: All [SchemaInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *SchemasPreviewAPI) GetByName(ctx context.Context, name string) (*SchemaInfo, error) { +func (a *SchemasAPI) GetByName(ctx context.Context, name string) (*SchemaInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListSchemasRequest{}) if err != nil { @@ -2445,7 +2445,7 @@ func (a *SchemasPreviewAPI) GetByName(ctx context.Context, name string) (*Schema return &alternatives[0], nil } -type StorageCredentialsPreviewInterface interface { +type StorageCredentialsInterface interface { // Create a storage credential. // @@ -2500,7 +2500,7 @@ type StorageCredentialsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListStorageCredentialsRequest) ([]StorageCredentialInfo, error) - // StorageCredentialInfoNameToIdMap calls [StorageCredentialsPreviewAPI.ListAll] and creates a map of results with [StorageCredentialInfo].Name as key and [StorageCredentialInfo].Id as value. + // StorageCredentialInfoNameToIdMap calls [StorageCredentialsAPI.ListAll] and creates a map of results with [StorageCredentialInfo].Name as key and [StorageCredentialInfo].Id as value. // // Returns an error if there's more than one [StorageCredentialInfo] with the same .Name. // @@ -2531,9 +2531,9 @@ type StorageCredentialsPreviewInterface interface { Validate(ctx context.Context, request ValidateStorageCredential) (*ValidateStorageCredentialResponse, error) } -func NewStorageCredentialsPreview(client *client.DatabricksClient) *StorageCredentialsPreviewAPI { - return &StorageCredentialsPreviewAPI{ - storageCredentialsPreviewImpl: storageCredentialsPreviewImpl{ +func NewStorageCredentials(client *client.DatabricksClient) *StorageCredentialsAPI { + return &StorageCredentialsAPI{ + storageCredentialsImpl: storageCredentialsImpl{ client: client, }, } @@ -2552,16 +2552,16 @@ func NewStorageCredentialsPreview(client *client.DatabricksClient) *StorageCrede // To create storage credentials, you must be a Databricks account admin. The // account admin who creates the storage credential can delegate ownership to // another user or group to manage permissions on it. -type StorageCredentialsPreviewAPI struct { - storageCredentialsPreviewImpl +type StorageCredentialsAPI struct { + storageCredentialsImpl } // Delete a credential. // // Deletes a storage credential from the metastore. The caller must be an owner // of the storage credential. -func (a *StorageCredentialsPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.storageCredentialsPreviewImpl.Delete(ctx, DeleteStorageCredentialRequest{ +func (a *StorageCredentialsAPI) DeleteByName(ctx context.Context, name string) error { + return a.storageCredentialsImpl.Delete(ctx, DeleteStorageCredentialRequest{ Name: name, }) } @@ -2571,20 +2571,20 @@ func (a *StorageCredentialsPreviewAPI) DeleteByName(ctx context.Context, name st // Gets a storage credential from the metastore. The caller must be a metastore // admin, the owner of the storage credential, or have some permission on the // storage credential. -func (a *StorageCredentialsPreviewAPI) GetByName(ctx context.Context, name string) (*StorageCredentialInfo, error) { - return a.storageCredentialsPreviewImpl.Get(ctx, GetStorageCredentialRequest{ +func (a *StorageCredentialsAPI) GetByName(ctx context.Context, name string) (*StorageCredentialInfo, error) { + return a.storageCredentialsImpl.Get(ctx, GetStorageCredentialRequest{ Name: name, }) } -// StorageCredentialInfoNameToIdMap calls [StorageCredentialsPreviewAPI.ListAll] and creates a map of results with [StorageCredentialInfo].Name as key and [StorageCredentialInfo].Id as value. +// StorageCredentialInfoNameToIdMap calls [StorageCredentialsAPI.ListAll] and creates a map of results with [StorageCredentialInfo].Name as key and [StorageCredentialInfo].Id as value. // // Returns an error if there's more than one [StorageCredentialInfo] with the same .Name. // // Note: All [StorageCredentialInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *StorageCredentialsPreviewAPI) StorageCredentialInfoNameToIdMap(ctx context.Context, request ListStorageCredentialsRequest) (map[string]string, error) { +func (a *StorageCredentialsAPI) StorageCredentialInfoNameToIdMap(ctx context.Context, request ListStorageCredentialsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -2602,7 +2602,7 @@ func (a *StorageCredentialsPreviewAPI) StorageCredentialInfoNameToIdMap(ctx cont return mapping, nil } -type SystemSchemasPreviewInterface interface { +type SystemSchemasInterface interface { // Disable a system schema. // @@ -2645,9 +2645,9 @@ type SystemSchemasPreviewInterface interface { ListByMetastoreId(ctx context.Context, metastoreId string) (*ListSystemSchemasResponse, error) } -func NewSystemSchemasPreview(client *client.DatabricksClient) *SystemSchemasPreviewAPI { - return &SystemSchemasPreviewAPI{ - systemSchemasPreviewImpl: systemSchemasPreviewImpl{ +func NewSystemSchemas(client *client.DatabricksClient) *SystemSchemasAPI { + return &SystemSchemasAPI{ + systemSchemasImpl: systemSchemasImpl{ client: client, }, } @@ -2656,16 +2656,16 @@ func NewSystemSchemasPreview(client *client.DatabricksClient) *SystemSchemasPrev // A system schema is a schema that lives within the system catalog. A system // schema may contain information about customer usage of Unity Catalog such as // audit-logs, billing-logs, lineage information, etc. -type SystemSchemasPreviewAPI struct { - systemSchemasPreviewImpl +type SystemSchemasAPI struct { + systemSchemasImpl } // Disable a system schema. // // Disables the system schema and removes it from the system catalog. The caller // must be an account admin or a metastore admin. -func (a *SystemSchemasPreviewAPI) DisableByMetastoreIdAndSchemaName(ctx context.Context, metastoreId string, schemaName string) error { - return a.systemSchemasPreviewImpl.Disable(ctx, DisableRequest{ +func (a *SystemSchemasAPI) DisableByMetastoreIdAndSchemaName(ctx context.Context, metastoreId string, schemaName string) error { + return a.systemSchemasImpl.Disable(ctx, DisableRequest{ MetastoreId: metastoreId, SchemaName: schemaName, }) @@ -2675,13 +2675,13 @@ func (a *SystemSchemasPreviewAPI) DisableByMetastoreIdAndSchemaName(ctx context. // // Gets an array of system schemas for a metastore. The caller must be an // account admin or a metastore admin. -func (a *SystemSchemasPreviewAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListSystemSchemasResponse, error) { - return a.systemSchemasPreviewImpl.internalList(ctx, ListSystemSchemasRequest{ +func (a *SystemSchemasAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListSystemSchemasResponse, error) { + return a.systemSchemasImpl.internalList(ctx, ListSystemSchemasRequest{ MetastoreId: metastoreId, }) } -type TableConstraintsPreviewInterface interface { +type TableConstraintsInterface interface { // Create a table constraint. // @@ -2724,9 +2724,9 @@ type TableConstraintsPreviewInterface interface { DeleteByFullName(ctx context.Context, fullName string) error } -func NewTableConstraintsPreview(client *client.DatabricksClient) *TableConstraintsPreviewAPI { - return &TableConstraintsPreviewAPI{ - tableConstraintsPreviewImpl: tableConstraintsPreviewImpl{ +func NewTableConstraints(client *client.DatabricksClient) *TableConstraintsAPI { + return &TableConstraintsAPI{ + tableConstraintsImpl: tableConstraintsImpl{ client: client, }, } @@ -2745,8 +2745,8 @@ func NewTableConstraintsPreview(client *client.DatabricksClient) *TableConstrain // You can declare primary keys and foreign keys as part of the table // specification during table creation. You can also add or drop constraints on // existing tables. -type TableConstraintsPreviewAPI struct { - tableConstraintsPreviewImpl +type TableConstraintsAPI struct { + tableConstraintsImpl } // Delete a table constraint. @@ -2760,13 +2760,13 @@ type TableConstraintsPreviewAPI struct { // the user must have the following permissions on all of the child tables: the // **USE_CATALOG** privilege on the table's catalog, the **USE_SCHEMA** // privilege on the table's schema, and be the owner of the table. -func (a *TableConstraintsPreviewAPI) DeleteByFullName(ctx context.Context, fullName string) error { - return a.tableConstraintsPreviewImpl.Delete(ctx, DeleteTableConstraintRequest{ +func (a *TableConstraintsAPI) DeleteByFullName(ctx context.Context, fullName string) error { + return a.tableConstraintsImpl.Delete(ctx, DeleteTableConstraintRequest{ FullName: fullName, }) } -type TablesPreviewInterface interface { +type TablesInterface interface { // Delete a table. // @@ -2856,7 +2856,7 @@ type TablesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListTablesRequest) ([]TableInfo, error) - // TableInfoNameToTableIdMap calls [TablesPreviewAPI.ListAll] and creates a map of results with [TableInfo].Name as key and [TableInfo].TableId as value. + // TableInfoNameToTableIdMap calls [TablesAPI.ListAll] and creates a map of results with [TableInfo].Name as key and [TableInfo].TableId as value. // // Returns an error if there's more than one [TableInfo] with the same .Name. // @@ -2865,7 +2865,7 @@ type TablesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. TableInfoNameToTableIdMap(ctx context.Context, request ListTablesRequest) (map[string]string, error) - // GetByName calls [TablesPreviewAPI.TableInfoNameToTableIdMap] and returns a single [TableInfo]. + // GetByName calls [TablesAPI.TableInfoNameToTableIdMap] and returns a single [TableInfo]. // // Returns an error if there's more than one [TableInfo] with the same .Name. // @@ -2918,9 +2918,9 @@ type TablesPreviewInterface interface { Update(ctx context.Context, request UpdateTableRequest) error } -func NewTablesPreview(client *client.DatabricksClient) *TablesPreviewAPI { - return &TablesPreviewAPI{ - tablesPreviewImpl: tablesPreviewImpl{ +func NewTables(client *client.DatabricksClient) *TablesAPI { + return &TablesAPI{ + tablesImpl: tablesImpl{ client: client, }, } @@ -2936,8 +2936,8 @@ func NewTablesPreview(client *client.DatabricksClient) *TablesPreviewAPI { // // A table can be managed or external. From an API perspective, a __VIEW__ is a // particular kind of table (rather than a managed or external table). -type TablesPreviewAPI struct { - tablesPreviewImpl +type TablesAPI struct { + tablesImpl } // Delete a table. @@ -2947,8 +2947,8 @@ type TablesPreviewAPI struct { // parent catalog and be the owner of the parent schema, or be the owner of the // table and have the **USE_CATALOG** privilege on the parent catalog and the // **USE_SCHEMA** privilege on the parent schema. -func (a *TablesPreviewAPI) DeleteByFullName(ctx context.Context, fullName string) error { - return a.tablesPreviewImpl.Delete(ctx, DeleteTableRequest{ +func (a *TablesAPI) DeleteByFullName(ctx context.Context, fullName string) error { + return a.tablesImpl.Delete(ctx, DeleteTableRequest{ FullName: fullName, }) } @@ -2963,8 +2963,8 @@ func (a *TablesPreviewAPI) DeleteByFullName(ctx context.Context, fullName string // privilege on the parent schema, and either be the table owner or have the // SELECT privilege on the table. * Have BROWSE privilege on the parent catalog // * Have BROWSE privilege on the parent schema. -func (a *TablesPreviewAPI) ExistsByFullName(ctx context.Context, fullName string) (*TableExistsResponse, error) { - return a.tablesPreviewImpl.Exists(ctx, ExistsRequest{ +func (a *TablesAPI) ExistsByFullName(ctx context.Context, fullName string) (*TableExistsResponse, error) { + return a.tablesImpl.Exists(ctx, ExistsRequest{ FullName: fullName, }) } @@ -2978,20 +2978,20 @@ func (a *TablesPreviewAPI) ExistsByFullName(ctx context.Context, fullName string // privilege on the parent catalog and the **USE_SCHEMA** privilege on the // parent schema, and either be the table owner or have the SELECT privilege on // the table. -func (a *TablesPreviewAPI) GetByFullName(ctx context.Context, fullName string) (*TableInfo, error) { - return a.tablesPreviewImpl.Get(ctx, GetTableRequest{ +func (a *TablesAPI) GetByFullName(ctx context.Context, fullName string) (*TableInfo, error) { + return a.tablesImpl.Get(ctx, GetTableRequest{ FullName: fullName, }) } -// TableInfoNameToTableIdMap calls [TablesPreviewAPI.ListAll] and creates a map of results with [TableInfo].Name as key and [TableInfo].TableId as value. +// TableInfoNameToTableIdMap calls [TablesAPI.ListAll] and creates a map of results with [TableInfo].Name as key and [TableInfo].TableId as value. // // Returns an error if there's more than one [TableInfo] with the same .Name. // // Note: All [TableInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *TablesPreviewAPI) TableInfoNameToTableIdMap(ctx context.Context, request ListTablesRequest) (map[string]string, error) { +func (a *TablesAPI) TableInfoNameToTableIdMap(ctx context.Context, request ListTablesRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -3009,14 +3009,14 @@ func (a *TablesPreviewAPI) TableInfoNameToTableIdMap(ctx context.Context, reques return mapping, nil } -// GetByName calls [TablesPreviewAPI.TableInfoNameToTableIdMap] and returns a single [TableInfo]. +// GetByName calls [TablesAPI.TableInfoNameToTableIdMap] and returns a single [TableInfo]. // // Returns an error if there's more than one [TableInfo] with the same .Name. // // Note: All [TableInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *TablesPreviewAPI) GetByName(ctx context.Context, name string) (*TableInfo, error) { +func (a *TablesAPI) GetByName(ctx context.Context, name string) (*TableInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListTablesRequest{}) if err != nil { @@ -3037,7 +3037,7 @@ func (a *TablesPreviewAPI) GetByName(ctx context.Context, name string) (*TableIn return &alternatives[0], nil } -type TemporaryTableCredentialsPreviewInterface interface { +type TemporaryTableCredentialsInterface interface { // Generate a temporary table credential. // @@ -3048,9 +3048,9 @@ type TemporaryTableCredentialsPreviewInterface interface { GenerateTemporaryTableCredentials(ctx context.Context, request GenerateTemporaryTableCredentialRequest) (*GenerateTemporaryTableCredentialResponse, error) } -func NewTemporaryTableCredentialsPreview(client *client.DatabricksClient) *TemporaryTableCredentialsPreviewAPI { - return &TemporaryTableCredentialsPreviewAPI{ - temporaryTableCredentialsPreviewImpl: temporaryTableCredentialsPreviewImpl{ +func NewTemporaryTableCredentials(client *client.DatabricksClient) *TemporaryTableCredentialsAPI { + return &TemporaryTableCredentialsAPI{ + temporaryTableCredentialsImpl: temporaryTableCredentialsImpl{ client: client, }, } @@ -3072,11 +3072,11 @@ func NewTemporaryTableCredentialsPreview(client *client.DatabricksClient) *Tempo // SCHEMA is a schema level permission that can only be granted by catalog admin // explicitly and is not included in schema ownership or ALL PRIVILEGES on the // schema for security reason. -type TemporaryTableCredentialsPreviewAPI struct { - temporaryTableCredentialsPreviewImpl +type TemporaryTableCredentialsAPI struct { + temporaryTableCredentialsImpl } -type VolumesPreviewInterface interface { +type VolumesInterface interface { // Create a Volume. // @@ -3154,7 +3154,7 @@ type VolumesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListVolumesRequest) ([]VolumeInfo, error) - // VolumeInfoNameToVolumeIdMap calls [VolumesPreviewAPI.ListAll] and creates a map of results with [VolumeInfo].Name as key and [VolumeInfo].VolumeId as value. + // VolumeInfoNameToVolumeIdMap calls [VolumesAPI.ListAll] and creates a map of results with [VolumeInfo].Name as key and [VolumeInfo].VolumeId as value. // // Returns an error if there's more than one [VolumeInfo] with the same .Name. // @@ -3163,7 +3163,7 @@ type VolumesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. VolumeInfoNameToVolumeIdMap(ctx context.Context, request ListVolumesRequest) (map[string]string, error) - // GetByName calls [VolumesPreviewAPI.VolumeInfoNameToVolumeIdMap] and returns a single [VolumeInfo]. + // GetByName calls [VolumesAPI.VolumeInfoNameToVolumeIdMap] and returns a single [VolumeInfo]. // // Returns an error if there's more than one [VolumeInfo] with the same .Name. // @@ -3206,9 +3206,9 @@ type VolumesPreviewInterface interface { Update(ctx context.Context, request UpdateVolumeRequestContent) (*VolumeInfo, error) } -func NewVolumesPreview(client *client.DatabricksClient) *VolumesPreviewAPI { - return &VolumesPreviewAPI{ - volumesPreviewImpl: volumesPreviewImpl{ +func NewVolumes(client *client.DatabricksClient) *VolumesAPI { + return &VolumesAPI{ + volumesImpl: volumesImpl{ client: client, }, } @@ -3222,8 +3222,8 @@ func NewVolumesPreview(client *client.DatabricksClient) *VolumesPreviewAPI { // cluster machines, storing library and config files of arbitrary formats such // as .whl or .txt centrally and providing secure access across workspaces to // it, or transforming and querying non-tabular data files in ETL. -type VolumesPreviewAPI struct { - volumesPreviewImpl +type VolumesAPI struct { + volumesImpl } // Delete a Volume. @@ -3234,20 +3234,20 @@ type VolumesPreviewAPI struct { // latter case, the caller must also be the owner or have the **USE_CATALOG** // privilege on the parent catalog and the **USE_SCHEMA** privilege on the // parent schema. -func (a *VolumesPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.volumesPreviewImpl.Delete(ctx, DeleteVolumeRequest{ +func (a *VolumesAPI) DeleteByName(ctx context.Context, name string) error { + return a.volumesImpl.Delete(ctx, DeleteVolumeRequest{ Name: name, }) } -// VolumeInfoNameToVolumeIdMap calls [VolumesPreviewAPI.ListAll] and creates a map of results with [VolumeInfo].Name as key and [VolumeInfo].VolumeId as value. +// VolumeInfoNameToVolumeIdMap calls [VolumesAPI.ListAll] and creates a map of results with [VolumeInfo].Name as key and [VolumeInfo].VolumeId as value. // // Returns an error if there's more than one [VolumeInfo] with the same .Name. // // Note: All [VolumeInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *VolumesPreviewAPI) VolumeInfoNameToVolumeIdMap(ctx context.Context, request ListVolumesRequest) (map[string]string, error) { +func (a *VolumesAPI) VolumeInfoNameToVolumeIdMap(ctx context.Context, request ListVolumesRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -3265,14 +3265,14 @@ func (a *VolumesPreviewAPI) VolumeInfoNameToVolumeIdMap(ctx context.Context, req return mapping, nil } -// GetByName calls [VolumesPreviewAPI.VolumeInfoNameToVolumeIdMap] and returns a single [VolumeInfo]. +// GetByName calls [VolumesAPI.VolumeInfoNameToVolumeIdMap] and returns a single [VolumeInfo]. // // Returns an error if there's more than one [VolumeInfo] with the same .Name. // // Note: All [VolumeInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *VolumesPreviewAPI) GetByName(ctx context.Context, name string) (*VolumeInfo, error) { +func (a *VolumesAPI) GetByName(ctx context.Context, name string) (*VolumeInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListVolumesRequest{}) if err != nil { @@ -3301,13 +3301,13 @@ func (a *VolumesPreviewAPI) GetByName(ctx context.Context, name string) (*Volume // VOLUME** privilege on) the volume. For the latter case, the caller must also // be the owner or have the **USE_CATALOG** privilege on the parent catalog and // the **USE_SCHEMA** privilege on the parent schema. -func (a *VolumesPreviewAPI) ReadByName(ctx context.Context, name string) (*VolumeInfo, error) { - return a.volumesPreviewImpl.Read(ctx, ReadVolumeRequest{ +func (a *VolumesAPI) ReadByName(ctx context.Context, name string) (*VolumeInfo, error) { + return a.volumesImpl.Read(ctx, ReadVolumeRequest{ Name: name, }) } -type WorkspaceBindingsPreviewInterface interface { +type WorkspaceBindingsInterface interface { // Get catalog workspace bindings. // @@ -3356,9 +3356,9 @@ type WorkspaceBindingsPreviewInterface interface { UpdateBindings(ctx context.Context, request UpdateWorkspaceBindingsParameters) (*WorkspaceBindingsResponse, error) } -func NewWorkspaceBindingsPreview(client *client.DatabricksClient) *WorkspaceBindingsPreviewAPI { - return &WorkspaceBindingsPreviewAPI{ - workspaceBindingsPreviewImpl: workspaceBindingsPreviewImpl{ +func NewWorkspaceBindings(client *client.DatabricksClient) *WorkspaceBindingsAPI { + return &WorkspaceBindingsAPI{ + workspaceBindingsImpl: workspaceBindingsImpl{ client: client, }, } @@ -3383,16 +3383,16 @@ func NewWorkspaceBindingsPreview(client *client.DatabricksClient) *WorkspaceBind // // Securable types that support binding: - catalog - storage_credential - // external_location -type WorkspaceBindingsPreviewAPI struct { - workspaceBindingsPreviewImpl +type WorkspaceBindingsAPI struct { + workspaceBindingsImpl } // Get catalog workspace bindings. // // Gets workspace bindings of the catalog. The caller must be a metastore admin // or an owner of the catalog. -func (a *WorkspaceBindingsPreviewAPI) GetByName(ctx context.Context, name string) (*CurrentWorkspaceBindings, error) { - return a.workspaceBindingsPreviewImpl.Get(ctx, GetWorkspaceBindingRequest{ +func (a *WorkspaceBindingsAPI) GetByName(ctx context.Context, name string) (*CurrentWorkspaceBindings, error) { + return a.workspaceBindingsImpl.Get(ctx, GetWorkspaceBindingRequest{ Name: name, }) } @@ -3401,8 +3401,8 @@ func (a *WorkspaceBindingsPreviewAPI) GetByName(ctx context.Context, name string // // Gets workspace bindings of the securable. The caller must be a metastore // admin or an owner of the securable. -func (a *WorkspaceBindingsPreviewAPI) GetBindingsBySecurableTypeAndSecurableName(ctx context.Context, securableType GetBindingsSecurableType, securableName string) (*WorkspaceBindingsResponse, error) { - return a.workspaceBindingsPreviewImpl.internalGetBindings(ctx, GetBindingsRequest{ +func (a *WorkspaceBindingsAPI) GetBindingsBySecurableTypeAndSecurableName(ctx context.Context, securableType GetBindingsSecurableType, securableName string) (*WorkspaceBindingsResponse, error) { + return a.workspaceBindingsImpl.internalGetBindings(ctx, GetBindingsRequest{ SecurableType: securableType, SecurableName: securableName, }) diff --git a/catalog/v2preview/client.go b/catalog/v2preview/client.go index e585fb838..0a959c183 100755 --- a/catalog/v2preview/client.go +++ b/catalog/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type AccountMetastoreAssignmentsPreviewClient struct { - AccountMetastoreAssignmentsPreviewInterface +type AccountMetastoreAssignmentsClient struct { + AccountMetastoreAssignmentsInterface Config *config.Config } -func NewAccountMetastoreAssignmentsPreviewClient(cfg *config.Config) (*AccountMetastoreAssignmentsPreviewClient, error) { +func NewAccountMetastoreAssignmentsClient(cfg *config.Config) (*AccountMetastoreAssignmentsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -34,19 +34,19 @@ func NewAccountMetastoreAssignmentsPreviewClient(cfg *config.Config) (*AccountMe return nil, err } - return &AccountMetastoreAssignmentsPreviewClient{ - Config: cfg, - AccountMetastoreAssignmentsPreviewInterface: NewAccountMetastoreAssignmentsPreview(apiClient), + return &AccountMetastoreAssignmentsClient{ + Config: cfg, + AccountMetastoreAssignmentsInterface: NewAccountMetastoreAssignments(apiClient), }, nil } -type AccountMetastoresPreviewClient struct { - AccountMetastoresPreviewInterface +type AccountMetastoresClient struct { + AccountMetastoresInterface Config *config.Config } -func NewAccountMetastoresPreviewClient(cfg *config.Config) (*AccountMetastoresPreviewClient, error) { +func NewAccountMetastoresClient(cfg *config.Config) (*AccountMetastoresClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -64,19 +64,19 @@ func NewAccountMetastoresPreviewClient(cfg *config.Config) (*AccountMetastoresPr return nil, err } - return &AccountMetastoresPreviewClient{ - Config: cfg, - AccountMetastoresPreviewInterface: NewAccountMetastoresPreview(apiClient), + return &AccountMetastoresClient{ + Config: cfg, + AccountMetastoresInterface: NewAccountMetastores(apiClient), }, nil } -type AccountStorageCredentialsPreviewClient struct { - AccountStorageCredentialsPreviewInterface +type AccountStorageCredentialsClient struct { + AccountStorageCredentialsInterface Config *config.Config } -func NewAccountStorageCredentialsPreviewClient(cfg *config.Config) (*AccountStorageCredentialsPreviewClient, error) { +func NewAccountStorageCredentialsClient(cfg *config.Config) (*AccountStorageCredentialsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -94,19 +94,19 @@ func NewAccountStorageCredentialsPreviewClient(cfg *config.Config) (*AccountStor return nil, err } - return &AccountStorageCredentialsPreviewClient{ - Config: cfg, - AccountStorageCredentialsPreviewInterface: NewAccountStorageCredentialsPreview(apiClient), + return &AccountStorageCredentialsClient{ + Config: cfg, + AccountStorageCredentialsInterface: NewAccountStorageCredentials(apiClient), }, nil } -type ArtifactAllowlistsPreviewClient struct { - ArtifactAllowlistsPreviewInterface +type ArtifactAllowlistsClient struct { + ArtifactAllowlistsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewArtifactAllowlistsPreviewClient(cfg *config.Config) (*ArtifactAllowlistsPreviewClient, error) { +func NewArtifactAllowlistsClient(cfg *config.Config) (*ArtifactAllowlistsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -127,20 +127,20 @@ func NewArtifactAllowlistsPreviewClient(cfg *config.Config) (*ArtifactAllowlists return nil, err } - return &ArtifactAllowlistsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ArtifactAllowlistsPreviewInterface: NewArtifactAllowlistsPreview(databricksClient), + return &ArtifactAllowlistsClient{ + Config: cfg, + apiClient: apiClient, + ArtifactAllowlistsInterface: NewArtifactAllowlists(databricksClient), }, nil } -type CatalogsPreviewClient struct { - CatalogsPreviewInterface +type CatalogsClient struct { + CatalogsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewCatalogsPreviewClient(cfg *config.Config) (*CatalogsPreviewClient, error) { +func NewCatalogsClient(cfg *config.Config) (*CatalogsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -161,20 +161,20 @@ func NewCatalogsPreviewClient(cfg *config.Config) (*CatalogsPreviewClient, error return nil, err } - return &CatalogsPreviewClient{ - Config: cfg, - apiClient: apiClient, - CatalogsPreviewInterface: NewCatalogsPreview(databricksClient), + return &CatalogsClient{ + Config: cfg, + apiClient: apiClient, + CatalogsInterface: NewCatalogs(databricksClient), }, nil } -type ConnectionsPreviewClient struct { - ConnectionsPreviewInterface +type ConnectionsClient struct { + ConnectionsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewConnectionsPreviewClient(cfg *config.Config) (*ConnectionsPreviewClient, error) { +func NewConnectionsClient(cfg *config.Config) (*ConnectionsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -195,20 +195,20 @@ func NewConnectionsPreviewClient(cfg *config.Config) (*ConnectionsPreviewClient, return nil, err } - return &ConnectionsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ConnectionsPreviewInterface: NewConnectionsPreview(databricksClient), + return &ConnectionsClient{ + Config: cfg, + apiClient: apiClient, + ConnectionsInterface: NewConnections(databricksClient), }, nil } -type CredentialsPreviewClient struct { - CredentialsPreviewInterface +type CredentialsClient struct { + CredentialsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewCredentialsPreviewClient(cfg *config.Config) (*CredentialsPreviewClient, error) { +func NewCredentialsClient(cfg *config.Config) (*CredentialsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -229,20 +229,20 @@ func NewCredentialsPreviewClient(cfg *config.Config) (*CredentialsPreviewClient, return nil, err } - return &CredentialsPreviewClient{ - Config: cfg, - apiClient: apiClient, - CredentialsPreviewInterface: NewCredentialsPreview(databricksClient), + return &CredentialsClient{ + Config: cfg, + apiClient: apiClient, + CredentialsInterface: NewCredentials(databricksClient), }, nil } -type ExternalLocationsPreviewClient struct { - ExternalLocationsPreviewInterface +type ExternalLocationsClient struct { + ExternalLocationsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewExternalLocationsPreviewClient(cfg *config.Config) (*ExternalLocationsPreviewClient, error) { +func NewExternalLocationsClient(cfg *config.Config) (*ExternalLocationsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -263,20 +263,20 @@ func NewExternalLocationsPreviewClient(cfg *config.Config) (*ExternalLocationsPr return nil, err } - return &ExternalLocationsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ExternalLocationsPreviewInterface: NewExternalLocationsPreview(databricksClient), + return &ExternalLocationsClient{ + Config: cfg, + apiClient: apiClient, + ExternalLocationsInterface: NewExternalLocations(databricksClient), }, nil } -type FunctionsPreviewClient struct { - FunctionsPreviewInterface +type FunctionsClient struct { + FunctionsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewFunctionsPreviewClient(cfg *config.Config) (*FunctionsPreviewClient, error) { +func NewFunctionsClient(cfg *config.Config) (*FunctionsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -297,20 +297,20 @@ func NewFunctionsPreviewClient(cfg *config.Config) (*FunctionsPreviewClient, err return nil, err } - return &FunctionsPreviewClient{ - Config: cfg, - apiClient: apiClient, - FunctionsPreviewInterface: NewFunctionsPreview(databricksClient), + return &FunctionsClient{ + Config: cfg, + apiClient: apiClient, + FunctionsInterface: NewFunctions(databricksClient), }, nil } -type GrantsPreviewClient struct { - GrantsPreviewInterface +type GrantsClient struct { + GrantsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewGrantsPreviewClient(cfg *config.Config) (*GrantsPreviewClient, error) { +func NewGrantsClient(cfg *config.Config) (*GrantsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -331,20 +331,20 @@ func NewGrantsPreviewClient(cfg *config.Config) (*GrantsPreviewClient, error) { return nil, err } - return &GrantsPreviewClient{ - Config: cfg, - apiClient: apiClient, - GrantsPreviewInterface: NewGrantsPreview(databricksClient), + return &GrantsClient{ + Config: cfg, + apiClient: apiClient, + GrantsInterface: NewGrants(databricksClient), }, nil } -type MetastoresPreviewClient struct { - MetastoresPreviewInterface +type MetastoresClient struct { + MetastoresInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewMetastoresPreviewClient(cfg *config.Config) (*MetastoresPreviewClient, error) { +func NewMetastoresClient(cfg *config.Config) (*MetastoresClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -365,20 +365,20 @@ func NewMetastoresPreviewClient(cfg *config.Config) (*MetastoresPreviewClient, e return nil, err } - return &MetastoresPreviewClient{ - Config: cfg, - apiClient: apiClient, - MetastoresPreviewInterface: NewMetastoresPreview(databricksClient), + return &MetastoresClient{ + Config: cfg, + apiClient: apiClient, + MetastoresInterface: NewMetastores(databricksClient), }, nil } -type ModelVersionsPreviewClient struct { - ModelVersionsPreviewInterface +type ModelVersionsClient struct { + ModelVersionsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewModelVersionsPreviewClient(cfg *config.Config) (*ModelVersionsPreviewClient, error) { +func NewModelVersionsClient(cfg *config.Config) (*ModelVersionsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -399,20 +399,20 @@ func NewModelVersionsPreviewClient(cfg *config.Config) (*ModelVersionsPreviewCli return nil, err } - return &ModelVersionsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ModelVersionsPreviewInterface: NewModelVersionsPreview(databricksClient), + return &ModelVersionsClient{ + Config: cfg, + apiClient: apiClient, + ModelVersionsInterface: NewModelVersions(databricksClient), }, nil } -type OnlineTablesPreviewClient struct { - OnlineTablesPreviewInterface +type OnlineTablesClient struct { + OnlineTablesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewOnlineTablesPreviewClient(cfg *config.Config) (*OnlineTablesPreviewClient, error) { +func NewOnlineTablesClient(cfg *config.Config) (*OnlineTablesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -433,20 +433,20 @@ func NewOnlineTablesPreviewClient(cfg *config.Config) (*OnlineTablesPreviewClien return nil, err } - return &OnlineTablesPreviewClient{ - Config: cfg, - apiClient: apiClient, - OnlineTablesPreviewInterface: NewOnlineTablesPreview(databricksClient), + return &OnlineTablesClient{ + Config: cfg, + apiClient: apiClient, + OnlineTablesInterface: NewOnlineTables(databricksClient), }, nil } -type QualityMonitorsPreviewClient struct { - QualityMonitorsPreviewInterface +type QualityMonitorsClient struct { + QualityMonitorsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewQualityMonitorsPreviewClient(cfg *config.Config) (*QualityMonitorsPreviewClient, error) { +func NewQualityMonitorsClient(cfg *config.Config) (*QualityMonitorsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -467,20 +467,20 @@ func NewQualityMonitorsPreviewClient(cfg *config.Config) (*QualityMonitorsPrevie return nil, err } - return &QualityMonitorsPreviewClient{ - Config: cfg, - apiClient: apiClient, - QualityMonitorsPreviewInterface: NewQualityMonitorsPreview(databricksClient), + return &QualityMonitorsClient{ + Config: cfg, + apiClient: apiClient, + QualityMonitorsInterface: NewQualityMonitors(databricksClient), }, nil } -type RegisteredModelsPreviewClient struct { - RegisteredModelsPreviewInterface +type RegisteredModelsClient struct { + RegisteredModelsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewRegisteredModelsPreviewClient(cfg *config.Config) (*RegisteredModelsPreviewClient, error) { +func NewRegisteredModelsClient(cfg *config.Config) (*RegisteredModelsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -501,20 +501,20 @@ func NewRegisteredModelsPreviewClient(cfg *config.Config) (*RegisteredModelsPrev return nil, err } - return &RegisteredModelsPreviewClient{ - Config: cfg, - apiClient: apiClient, - RegisteredModelsPreviewInterface: NewRegisteredModelsPreview(databricksClient), + return &RegisteredModelsClient{ + Config: cfg, + apiClient: apiClient, + RegisteredModelsInterface: NewRegisteredModels(databricksClient), }, nil } -type ResourceQuotasPreviewClient struct { - ResourceQuotasPreviewInterface +type ResourceQuotasClient struct { + ResourceQuotasInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewResourceQuotasPreviewClient(cfg *config.Config) (*ResourceQuotasPreviewClient, error) { +func NewResourceQuotasClient(cfg *config.Config) (*ResourceQuotasClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -535,20 +535,20 @@ func NewResourceQuotasPreviewClient(cfg *config.Config) (*ResourceQuotasPreviewC return nil, err } - return &ResourceQuotasPreviewClient{ - Config: cfg, - apiClient: apiClient, - ResourceQuotasPreviewInterface: NewResourceQuotasPreview(databricksClient), + return &ResourceQuotasClient{ + Config: cfg, + apiClient: apiClient, + ResourceQuotasInterface: NewResourceQuotas(databricksClient), }, nil } -type SchemasPreviewClient struct { - SchemasPreviewInterface +type SchemasClient struct { + SchemasInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewSchemasPreviewClient(cfg *config.Config) (*SchemasPreviewClient, error) { +func NewSchemasClient(cfg *config.Config) (*SchemasClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -569,20 +569,20 @@ func NewSchemasPreviewClient(cfg *config.Config) (*SchemasPreviewClient, error) return nil, err } - return &SchemasPreviewClient{ - Config: cfg, - apiClient: apiClient, - SchemasPreviewInterface: NewSchemasPreview(databricksClient), + return &SchemasClient{ + Config: cfg, + apiClient: apiClient, + SchemasInterface: NewSchemas(databricksClient), }, nil } -type StorageCredentialsPreviewClient struct { - StorageCredentialsPreviewInterface +type StorageCredentialsClient struct { + StorageCredentialsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewStorageCredentialsPreviewClient(cfg *config.Config) (*StorageCredentialsPreviewClient, error) { +func NewStorageCredentialsClient(cfg *config.Config) (*StorageCredentialsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -603,20 +603,20 @@ func NewStorageCredentialsPreviewClient(cfg *config.Config) (*StorageCredentials return nil, err } - return &StorageCredentialsPreviewClient{ - Config: cfg, - apiClient: apiClient, - StorageCredentialsPreviewInterface: NewStorageCredentialsPreview(databricksClient), + return &StorageCredentialsClient{ + Config: cfg, + apiClient: apiClient, + StorageCredentialsInterface: NewStorageCredentials(databricksClient), }, nil } -type SystemSchemasPreviewClient struct { - SystemSchemasPreviewInterface +type SystemSchemasClient struct { + SystemSchemasInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewSystemSchemasPreviewClient(cfg *config.Config) (*SystemSchemasPreviewClient, error) { +func NewSystemSchemasClient(cfg *config.Config) (*SystemSchemasClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -637,20 +637,20 @@ func NewSystemSchemasPreviewClient(cfg *config.Config) (*SystemSchemasPreviewCli return nil, err } - return &SystemSchemasPreviewClient{ - Config: cfg, - apiClient: apiClient, - SystemSchemasPreviewInterface: NewSystemSchemasPreview(databricksClient), + return &SystemSchemasClient{ + Config: cfg, + apiClient: apiClient, + SystemSchemasInterface: NewSystemSchemas(databricksClient), }, nil } -type TableConstraintsPreviewClient struct { - TableConstraintsPreviewInterface +type TableConstraintsClient struct { + TableConstraintsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewTableConstraintsPreviewClient(cfg *config.Config) (*TableConstraintsPreviewClient, error) { +func NewTableConstraintsClient(cfg *config.Config) (*TableConstraintsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -671,20 +671,20 @@ func NewTableConstraintsPreviewClient(cfg *config.Config) (*TableConstraintsPrev return nil, err } - return &TableConstraintsPreviewClient{ - Config: cfg, - apiClient: apiClient, - TableConstraintsPreviewInterface: NewTableConstraintsPreview(databricksClient), + return &TableConstraintsClient{ + Config: cfg, + apiClient: apiClient, + TableConstraintsInterface: NewTableConstraints(databricksClient), }, nil } -type TablesPreviewClient struct { - TablesPreviewInterface +type TablesClient struct { + TablesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewTablesPreviewClient(cfg *config.Config) (*TablesPreviewClient, error) { +func NewTablesClient(cfg *config.Config) (*TablesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -705,20 +705,20 @@ func NewTablesPreviewClient(cfg *config.Config) (*TablesPreviewClient, error) { return nil, err } - return &TablesPreviewClient{ - Config: cfg, - apiClient: apiClient, - TablesPreviewInterface: NewTablesPreview(databricksClient), + return &TablesClient{ + Config: cfg, + apiClient: apiClient, + TablesInterface: NewTables(databricksClient), }, nil } -type TemporaryTableCredentialsPreviewClient struct { - TemporaryTableCredentialsPreviewInterface +type TemporaryTableCredentialsClient struct { + TemporaryTableCredentialsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewTemporaryTableCredentialsPreviewClient(cfg *config.Config) (*TemporaryTableCredentialsPreviewClient, error) { +func NewTemporaryTableCredentialsClient(cfg *config.Config) (*TemporaryTableCredentialsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -739,20 +739,20 @@ func NewTemporaryTableCredentialsPreviewClient(cfg *config.Config) (*TemporaryTa return nil, err } - return &TemporaryTableCredentialsPreviewClient{ - Config: cfg, - apiClient: apiClient, - TemporaryTableCredentialsPreviewInterface: NewTemporaryTableCredentialsPreview(databricksClient), + return &TemporaryTableCredentialsClient{ + Config: cfg, + apiClient: apiClient, + TemporaryTableCredentialsInterface: NewTemporaryTableCredentials(databricksClient), }, nil } -type VolumesPreviewClient struct { - VolumesPreviewInterface +type VolumesClient struct { + VolumesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewVolumesPreviewClient(cfg *config.Config) (*VolumesPreviewClient, error) { +func NewVolumesClient(cfg *config.Config) (*VolumesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -773,20 +773,20 @@ func NewVolumesPreviewClient(cfg *config.Config) (*VolumesPreviewClient, error) return nil, err } - return &VolumesPreviewClient{ - Config: cfg, - apiClient: apiClient, - VolumesPreviewInterface: NewVolumesPreview(databricksClient), + return &VolumesClient{ + Config: cfg, + apiClient: apiClient, + VolumesInterface: NewVolumes(databricksClient), }, nil } -type WorkspaceBindingsPreviewClient struct { - WorkspaceBindingsPreviewInterface +type WorkspaceBindingsClient struct { + WorkspaceBindingsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewWorkspaceBindingsPreviewClient(cfg *config.Config) (*WorkspaceBindingsPreviewClient, error) { +func NewWorkspaceBindingsClient(cfg *config.Config) (*WorkspaceBindingsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -807,9 +807,9 @@ func NewWorkspaceBindingsPreviewClient(cfg *config.Config) (*WorkspaceBindingsPr return nil, err } - return &WorkspaceBindingsPreviewClient{ - Config: cfg, - apiClient: apiClient, - WorkspaceBindingsPreviewInterface: NewWorkspaceBindingsPreview(databricksClient), + return &WorkspaceBindingsClient{ + Config: cfg, + apiClient: apiClient, + WorkspaceBindingsInterface: NewWorkspaceBindings(databricksClient), }, nil } diff --git a/catalog/v2preview/impl.go b/catalog/v2preview/impl.go index e8ff2584b..0f98aadbc 100755 --- a/catalog/v2preview/impl.go +++ b/catalog/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just AccountMetastoreAssignmentsPreview API methods -type accountMetastoreAssignmentsPreviewImpl struct { +// unexported type that holds implementations of just AccountMetastoreAssignments API methods +type accountMetastoreAssignmentsImpl struct { client *client.DatabricksClient } -func (a *accountMetastoreAssignmentsPreviewImpl) Create(ctx context.Context, request AccountsCreateMetastoreAssignment) error { +func (a *accountMetastoreAssignmentsImpl) Create(ctx context.Context, request AccountsCreateMetastoreAssignment) error { var createResponse CreateResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/metastores/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.MetastoreId) queryParams := make(map[string]any) @@ -28,7 +28,7 @@ func (a *accountMetastoreAssignmentsPreviewImpl) Create(ctx context.Context, req return err } -func (a *accountMetastoreAssignmentsPreviewImpl) Delete(ctx context.Context, request DeleteAccountMetastoreAssignmentRequest) error { +func (a *accountMetastoreAssignmentsImpl) Delete(ctx context.Context, request DeleteAccountMetastoreAssignmentRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/metastores/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.MetastoreId) queryParams := make(map[string]any) @@ -38,7 +38,7 @@ func (a *accountMetastoreAssignmentsPreviewImpl) Delete(ctx context.Context, req return err } -func (a *accountMetastoreAssignmentsPreviewImpl) Get(ctx context.Context, request GetAccountMetastoreAssignmentRequest) (*AccountsMetastoreAssignment, error) { +func (a *accountMetastoreAssignmentsImpl) Get(ctx context.Context, request GetAccountMetastoreAssignmentRequest) (*AccountsMetastoreAssignment, error) { var accountsMetastoreAssignment AccountsMetastoreAssignment path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/metastore", a.client.ConfiguredAccountID(), request.WorkspaceId) queryParams := make(map[string]any) @@ -52,7 +52,7 @@ func (a *accountMetastoreAssignmentsPreviewImpl) Get(ctx context.Context, reques // // Gets a list of all Databricks workspace IDs that have been assigned to given // metastore. -func (a *accountMetastoreAssignmentsPreviewImpl) List(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) listing.Iterator[int64] { +func (a *accountMetastoreAssignmentsImpl) List(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) listing.Iterator[int64] { getNextPage := func(ctx context.Context, req ListAccountMetastoreAssignmentsRequest) (*ListAccountMetastoreAssignmentsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -74,11 +74,11 @@ func (a *accountMetastoreAssignmentsPreviewImpl) List(ctx context.Context, reque // // Gets a list of all Databricks workspace IDs that have been assigned to given // metastore. -func (a *accountMetastoreAssignmentsPreviewImpl) ListAll(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) ([]int64, error) { +func (a *accountMetastoreAssignmentsImpl) ListAll(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) ([]int64, error) { iterator := a.List(ctx, request) return listing.ToSlice[int64](ctx, iterator) } -func (a *accountMetastoreAssignmentsPreviewImpl) internalList(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) (*ListAccountMetastoreAssignmentsResponse, error) { +func (a *accountMetastoreAssignmentsImpl) internalList(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) (*ListAccountMetastoreAssignmentsResponse, error) { var listAccountMetastoreAssignmentsResponse ListAccountMetastoreAssignmentsResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/workspaces", a.client.ConfiguredAccountID(), request.MetastoreId) queryParams := make(map[string]any) @@ -88,7 +88,7 @@ func (a *accountMetastoreAssignmentsPreviewImpl) internalList(ctx context.Contex return &listAccountMetastoreAssignmentsResponse, err } -func (a *accountMetastoreAssignmentsPreviewImpl) Update(ctx context.Context, request AccountsUpdateMetastoreAssignment) error { +func (a *accountMetastoreAssignmentsImpl) Update(ctx context.Context, request AccountsUpdateMetastoreAssignment) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/metastores/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.MetastoreId) queryParams := make(map[string]any) @@ -99,12 +99,12 @@ func (a *accountMetastoreAssignmentsPreviewImpl) Update(ctx context.Context, req return err } -// unexported type that holds implementations of just AccountMetastoresPreview API methods -type accountMetastoresPreviewImpl struct { +// unexported type that holds implementations of just AccountMetastores API methods +type accountMetastoresImpl struct { client *client.DatabricksClient } -func (a *accountMetastoresPreviewImpl) Create(ctx context.Context, request AccountsCreateMetastore) (*AccountsMetastoreInfo, error) { +func (a *accountMetastoresImpl) Create(ctx context.Context, request AccountsCreateMetastore) (*AccountsMetastoreInfo, error) { var accountsMetastoreInfo AccountsMetastoreInfo path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -115,7 +115,7 @@ func (a *accountMetastoresPreviewImpl) Create(ctx context.Context, request Accou return &accountsMetastoreInfo, err } -func (a *accountMetastoresPreviewImpl) Delete(ctx context.Context, request DeleteAccountMetastoreRequest) error { +func (a *accountMetastoresImpl) Delete(ctx context.Context, request DeleteAccountMetastoreRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v", a.client.ConfiguredAccountID(), request.MetastoreId) queryParams := make(map[string]any) @@ -125,7 +125,7 @@ func (a *accountMetastoresPreviewImpl) Delete(ctx context.Context, request Delet return err } -func (a *accountMetastoresPreviewImpl) Get(ctx context.Context, request GetAccountMetastoreRequest) (*AccountsMetastoreInfo, error) { +func (a *accountMetastoresImpl) Get(ctx context.Context, request GetAccountMetastoreRequest) (*AccountsMetastoreInfo, error) { var accountsMetastoreInfo AccountsMetastoreInfo path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v", a.client.ConfiguredAccountID(), request.MetastoreId) queryParams := make(map[string]any) @@ -138,7 +138,7 @@ func (a *accountMetastoresPreviewImpl) Get(ctx context.Context, request GetAccou // Get all metastores associated with an account. // // Gets all Unity Catalog metastores associated with an account specified by ID. -func (a *accountMetastoresPreviewImpl) List(ctx context.Context) listing.Iterator[MetastoreInfo] { +func (a *accountMetastoresImpl) List(ctx context.Context) listing.Iterator[MetastoreInfo] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*ListMetastoresResponse, error) { @@ -160,11 +160,11 @@ func (a *accountMetastoresPreviewImpl) List(ctx context.Context) listing.Iterato // Get all metastores associated with an account. // // Gets all Unity Catalog metastores associated with an account specified by ID. -func (a *accountMetastoresPreviewImpl) ListAll(ctx context.Context) ([]MetastoreInfo, error) { +func (a *accountMetastoresImpl) ListAll(ctx context.Context) ([]MetastoreInfo, error) { iterator := a.List(ctx) return listing.ToSlice[MetastoreInfo](ctx, iterator) } -func (a *accountMetastoresPreviewImpl) internalList(ctx context.Context) (*ListMetastoresResponse, error) { +func (a *accountMetastoresImpl) internalList(ctx context.Context) (*ListMetastoresResponse, error) { var listMetastoresResponse ListMetastoresResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores", a.client.ConfiguredAccountID()) @@ -174,7 +174,7 @@ func (a *accountMetastoresPreviewImpl) internalList(ctx context.Context) (*ListM return &listMetastoresResponse, err } -func (a *accountMetastoresPreviewImpl) Update(ctx context.Context, request AccountsUpdateMetastore) (*AccountsMetastoreInfo, error) { +func (a *accountMetastoresImpl) Update(ctx context.Context, request AccountsUpdateMetastore) (*AccountsMetastoreInfo, error) { var accountsMetastoreInfo AccountsMetastoreInfo path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v", a.client.ConfiguredAccountID(), request.MetastoreId) queryParams := make(map[string]any) @@ -185,12 +185,12 @@ func (a *accountMetastoresPreviewImpl) Update(ctx context.Context, request Accou return &accountsMetastoreInfo, err } -// unexported type that holds implementations of just AccountStorageCredentialsPreview API methods -type accountStorageCredentialsPreviewImpl struct { +// unexported type that holds implementations of just AccountStorageCredentials API methods +type accountStorageCredentialsImpl struct { client *client.DatabricksClient } -func (a *accountStorageCredentialsPreviewImpl) Create(ctx context.Context, request AccountsCreateStorageCredential) (*AccountsStorageCredentialInfo, error) { +func (a *accountStorageCredentialsImpl) Create(ctx context.Context, request AccountsCreateStorageCredential) (*AccountsStorageCredentialInfo, error) { var accountsStorageCredentialInfo AccountsStorageCredentialInfo path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/storage-credentials", a.client.ConfiguredAccountID(), request.MetastoreId) queryParams := make(map[string]any) @@ -201,7 +201,7 @@ func (a *accountStorageCredentialsPreviewImpl) Create(ctx context.Context, reque return &accountsStorageCredentialInfo, err } -func (a *accountStorageCredentialsPreviewImpl) Delete(ctx context.Context, request DeleteAccountStorageCredentialRequest) error { +func (a *accountStorageCredentialsImpl) Delete(ctx context.Context, request DeleteAccountStorageCredentialRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/storage-credentials/%v", a.client.ConfiguredAccountID(), request.MetastoreId, request.StorageCredentialName) queryParams := make(map[string]any) @@ -211,7 +211,7 @@ func (a *accountStorageCredentialsPreviewImpl) Delete(ctx context.Context, reque return err } -func (a *accountStorageCredentialsPreviewImpl) Get(ctx context.Context, request GetAccountStorageCredentialRequest) (*AccountsStorageCredentialInfo, error) { +func (a *accountStorageCredentialsImpl) Get(ctx context.Context, request GetAccountStorageCredentialRequest) (*AccountsStorageCredentialInfo, error) { var accountsStorageCredentialInfo AccountsStorageCredentialInfo path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/storage-credentials/%v", a.client.ConfiguredAccountID(), request.MetastoreId, request.StorageCredentialName) queryParams := make(map[string]any) @@ -225,7 +225,7 @@ func (a *accountStorageCredentialsPreviewImpl) Get(ctx context.Context, request // // Gets a list of all storage credentials that have been assigned to given // metastore. -func (a *accountStorageCredentialsPreviewImpl) List(ctx context.Context, request ListAccountStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { +func (a *accountStorageCredentialsImpl) List(ctx context.Context, request ListAccountStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { getNextPage := func(ctx context.Context, req ListAccountStorageCredentialsRequest) (*ListAccountStorageCredentialsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -247,11 +247,11 @@ func (a *accountStorageCredentialsPreviewImpl) List(ctx context.Context, request // // Gets a list of all storage credentials that have been assigned to given // metastore. -func (a *accountStorageCredentialsPreviewImpl) ListAll(ctx context.Context, request ListAccountStorageCredentialsRequest) ([]StorageCredentialInfo, error) { +func (a *accountStorageCredentialsImpl) ListAll(ctx context.Context, request ListAccountStorageCredentialsRequest) ([]StorageCredentialInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[StorageCredentialInfo](ctx, iterator) } -func (a *accountStorageCredentialsPreviewImpl) internalList(ctx context.Context, request ListAccountStorageCredentialsRequest) (*ListAccountStorageCredentialsResponse, error) { +func (a *accountStorageCredentialsImpl) internalList(ctx context.Context, request ListAccountStorageCredentialsRequest) (*ListAccountStorageCredentialsResponse, error) { var listAccountStorageCredentialsResponse ListAccountStorageCredentialsResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/storage-credentials", a.client.ConfiguredAccountID(), request.MetastoreId) queryParams := make(map[string]any) @@ -261,7 +261,7 @@ func (a *accountStorageCredentialsPreviewImpl) internalList(ctx context.Context, return &listAccountStorageCredentialsResponse, err } -func (a *accountStorageCredentialsPreviewImpl) Update(ctx context.Context, request AccountsUpdateStorageCredential) (*AccountsStorageCredentialInfo, error) { +func (a *accountStorageCredentialsImpl) Update(ctx context.Context, request AccountsUpdateStorageCredential) (*AccountsStorageCredentialInfo, error) { var accountsStorageCredentialInfo AccountsStorageCredentialInfo path := fmt.Sprintf("/api/2.0preview/accounts/%v/metastores/%v/storage-credentials/%v", a.client.ConfiguredAccountID(), request.MetastoreId, request.StorageCredentialName) queryParams := make(map[string]any) @@ -272,12 +272,12 @@ func (a *accountStorageCredentialsPreviewImpl) Update(ctx context.Context, reque return &accountsStorageCredentialInfo, err } -// unexported type that holds implementations of just ArtifactAllowlistsPreview API methods -type artifactAllowlistsPreviewImpl struct { +// unexported type that holds implementations of just ArtifactAllowlists API methods +type artifactAllowlistsImpl struct { client *client.DatabricksClient } -func (a *artifactAllowlistsPreviewImpl) Get(ctx context.Context, request GetArtifactAllowlistRequest) (*ArtifactAllowlistInfo, error) { +func (a *artifactAllowlistsImpl) Get(ctx context.Context, request GetArtifactAllowlistRequest) (*ArtifactAllowlistInfo, error) { var artifactAllowlistInfo ArtifactAllowlistInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/artifact-allowlists/%v", request.ArtifactType) queryParams := make(map[string]any) @@ -287,7 +287,7 @@ func (a *artifactAllowlistsPreviewImpl) Get(ctx context.Context, request GetArti return &artifactAllowlistInfo, err } -func (a *artifactAllowlistsPreviewImpl) Update(ctx context.Context, request SetArtifactAllowlist) (*ArtifactAllowlistInfo, error) { +func (a *artifactAllowlistsImpl) Update(ctx context.Context, request SetArtifactAllowlist) (*ArtifactAllowlistInfo, error) { var artifactAllowlistInfo ArtifactAllowlistInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/artifact-allowlists/%v", request.ArtifactType) queryParams := make(map[string]any) @@ -298,12 +298,12 @@ func (a *artifactAllowlistsPreviewImpl) Update(ctx context.Context, request SetA return &artifactAllowlistInfo, err } -// unexported type that holds implementations of just CatalogsPreview API methods -type catalogsPreviewImpl struct { +// unexported type that holds implementations of just Catalogs API methods +type catalogsImpl struct { client *client.DatabricksClient } -func (a *catalogsPreviewImpl) Create(ctx context.Context, request CreateCatalog) (*CatalogInfo, error) { +func (a *catalogsImpl) Create(ctx context.Context, request CreateCatalog) (*CatalogInfo, error) { var catalogInfo CatalogInfo path := "/api/2.1preview/unity-catalog/catalogs" queryParams := make(map[string]any) @@ -314,7 +314,7 @@ func (a *catalogsPreviewImpl) Create(ctx context.Context, request CreateCatalog) return &catalogInfo, err } -func (a *catalogsPreviewImpl) Delete(ctx context.Context, request DeleteCatalogRequest) error { +func (a *catalogsImpl) Delete(ctx context.Context, request DeleteCatalogRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/catalogs/%v", request.Name) queryParams := make(map[string]any) @@ -324,7 +324,7 @@ func (a *catalogsPreviewImpl) Delete(ctx context.Context, request DeleteCatalogR return err } -func (a *catalogsPreviewImpl) Get(ctx context.Context, request GetCatalogRequest) (*CatalogInfo, error) { +func (a *catalogsImpl) Get(ctx context.Context, request GetCatalogRequest) (*CatalogInfo, error) { var catalogInfo CatalogInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/catalogs/%v", request.Name) queryParams := make(map[string]any) @@ -341,7 +341,7 @@ func (a *catalogsPreviewImpl) Get(ctx context.Context, request GetCatalogRequest // caller (or for which the caller has the **USE_CATALOG** privilege) will be // retrieved. There is no guarantee of a specific ordering of the elements in // the array. -func (a *catalogsPreviewImpl) List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo] { +func (a *catalogsImpl) List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo] { getNextPage := func(ctx context.Context, req ListCatalogsRequest) (*ListCatalogsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -372,11 +372,11 @@ func (a *catalogsPreviewImpl) List(ctx context.Context, request ListCatalogsRequ // caller (or for which the caller has the **USE_CATALOG** privilege) will be // retrieved. There is no guarantee of a specific ordering of the elements in // the array. -func (a *catalogsPreviewImpl) ListAll(ctx context.Context, request ListCatalogsRequest) ([]CatalogInfo, error) { +func (a *catalogsImpl) ListAll(ctx context.Context, request ListCatalogsRequest) ([]CatalogInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[CatalogInfo](ctx, iterator) } -func (a *catalogsPreviewImpl) internalList(ctx context.Context, request ListCatalogsRequest) (*ListCatalogsResponse, error) { +func (a *catalogsImpl) internalList(ctx context.Context, request ListCatalogsRequest) (*ListCatalogsResponse, error) { var listCatalogsResponse ListCatalogsResponse path := "/api/2.1preview/unity-catalog/catalogs" queryParams := make(map[string]any) @@ -386,7 +386,7 @@ func (a *catalogsPreviewImpl) internalList(ctx context.Context, request ListCata return &listCatalogsResponse, err } -func (a *catalogsPreviewImpl) Update(ctx context.Context, request UpdateCatalog) (*CatalogInfo, error) { +func (a *catalogsImpl) Update(ctx context.Context, request UpdateCatalog) (*CatalogInfo, error) { var catalogInfo CatalogInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/catalogs/%v", request.Name) queryParams := make(map[string]any) @@ -397,12 +397,12 @@ func (a *catalogsPreviewImpl) Update(ctx context.Context, request UpdateCatalog) return &catalogInfo, err } -// unexported type that holds implementations of just ConnectionsPreview API methods -type connectionsPreviewImpl struct { +// unexported type that holds implementations of just Connections API methods +type connectionsImpl struct { client *client.DatabricksClient } -func (a *connectionsPreviewImpl) Create(ctx context.Context, request CreateConnection) (*ConnectionInfo, error) { +func (a *connectionsImpl) Create(ctx context.Context, request CreateConnection) (*ConnectionInfo, error) { var connectionInfo ConnectionInfo path := "/api/2.1preview/unity-catalog/connections" queryParams := make(map[string]any) @@ -413,7 +413,7 @@ func (a *connectionsPreviewImpl) Create(ctx context.Context, request CreateConne return &connectionInfo, err } -func (a *connectionsPreviewImpl) Delete(ctx context.Context, request DeleteConnectionRequest) error { +func (a *connectionsImpl) Delete(ctx context.Context, request DeleteConnectionRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/connections/%v", request.Name) queryParams := make(map[string]any) @@ -423,7 +423,7 @@ func (a *connectionsPreviewImpl) Delete(ctx context.Context, request DeleteConne return err } -func (a *connectionsPreviewImpl) Get(ctx context.Context, request GetConnectionRequest) (*ConnectionInfo, error) { +func (a *connectionsImpl) Get(ctx context.Context, request GetConnectionRequest) (*ConnectionInfo, error) { var connectionInfo ConnectionInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/connections/%v", request.Name) queryParams := make(map[string]any) @@ -436,7 +436,7 @@ func (a *connectionsPreviewImpl) Get(ctx context.Context, request GetConnectionR // List connections. // // List all connections. -func (a *connectionsPreviewImpl) List(ctx context.Context, request ListConnectionsRequest) listing.Iterator[ConnectionInfo] { +func (a *connectionsImpl) List(ctx context.Context, request ListConnectionsRequest) listing.Iterator[ConnectionInfo] { getNextPage := func(ctx context.Context, req ListConnectionsRequest) (*ListConnectionsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -463,11 +463,11 @@ func (a *connectionsPreviewImpl) List(ctx context.Context, request ListConnectio // List connections. // // List all connections. -func (a *connectionsPreviewImpl) ListAll(ctx context.Context, request ListConnectionsRequest) ([]ConnectionInfo, error) { +func (a *connectionsImpl) ListAll(ctx context.Context, request ListConnectionsRequest) ([]ConnectionInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[ConnectionInfo](ctx, iterator) } -func (a *connectionsPreviewImpl) internalList(ctx context.Context, request ListConnectionsRequest) (*ListConnectionsResponse, error) { +func (a *connectionsImpl) internalList(ctx context.Context, request ListConnectionsRequest) (*ListConnectionsResponse, error) { var listConnectionsResponse ListConnectionsResponse path := "/api/2.1preview/unity-catalog/connections" queryParams := make(map[string]any) @@ -477,7 +477,7 @@ func (a *connectionsPreviewImpl) internalList(ctx context.Context, request ListC return &listConnectionsResponse, err } -func (a *connectionsPreviewImpl) Update(ctx context.Context, request UpdateConnection) (*ConnectionInfo, error) { +func (a *connectionsImpl) Update(ctx context.Context, request UpdateConnection) (*ConnectionInfo, error) { var connectionInfo ConnectionInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/connections/%v", request.Name) queryParams := make(map[string]any) @@ -488,12 +488,12 @@ func (a *connectionsPreviewImpl) Update(ctx context.Context, request UpdateConne return &connectionInfo, err } -// unexported type that holds implementations of just CredentialsPreview API methods -type credentialsPreviewImpl struct { +// unexported type that holds implementations of just Credentials API methods +type credentialsImpl struct { client *client.DatabricksClient } -func (a *credentialsPreviewImpl) CreateCredential(ctx context.Context, request CreateCredentialRequest) (*CredentialInfo, error) { +func (a *credentialsImpl) CreateCredential(ctx context.Context, request CreateCredentialRequest) (*CredentialInfo, error) { var credentialInfo CredentialInfo path := "/api/2.1preview/unity-catalog/credentials" queryParams := make(map[string]any) @@ -504,7 +504,7 @@ func (a *credentialsPreviewImpl) CreateCredential(ctx context.Context, request C return &credentialInfo, err } -func (a *credentialsPreviewImpl) DeleteCredential(ctx context.Context, request DeleteCredentialRequest) error { +func (a *credentialsImpl) DeleteCredential(ctx context.Context, request DeleteCredentialRequest) error { var deleteCredentialResponse DeleteCredentialResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/credentials/%v", request.NameArg) queryParams := make(map[string]any) @@ -514,7 +514,7 @@ func (a *credentialsPreviewImpl) DeleteCredential(ctx context.Context, request D return err } -func (a *credentialsPreviewImpl) GenerateTemporaryServiceCredential(ctx context.Context, request GenerateTemporaryServiceCredentialRequest) (*TemporaryCredentials, error) { +func (a *credentialsImpl) GenerateTemporaryServiceCredential(ctx context.Context, request GenerateTemporaryServiceCredentialRequest) (*TemporaryCredentials, error) { var temporaryCredentials TemporaryCredentials path := "/api/2.1preview/unity-catalog/temporary-service-credentials" queryParams := make(map[string]any) @@ -525,7 +525,7 @@ func (a *credentialsPreviewImpl) GenerateTemporaryServiceCredential(ctx context. return &temporaryCredentials, err } -func (a *credentialsPreviewImpl) GetCredential(ctx context.Context, request GetCredentialRequest) (*CredentialInfo, error) { +func (a *credentialsImpl) GetCredential(ctx context.Context, request GetCredentialRequest) (*CredentialInfo, error) { var credentialInfo CredentialInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/credentials/%v", request.NameArg) queryParams := make(map[string]any) @@ -543,7 +543,7 @@ func (a *credentialsPreviewImpl) GetCredential(ctx context.Context, request GetC // to access. If the caller is a metastore admin, retrieval of credentials is // unrestricted. There is no guarantee of a specific ordering of the elements in // the array. -func (a *credentialsPreviewImpl) ListCredentials(ctx context.Context, request ListCredentialsRequest) listing.Iterator[CredentialInfo] { +func (a *credentialsImpl) ListCredentials(ctx context.Context, request ListCredentialsRequest) listing.Iterator[CredentialInfo] { getNextPage := func(ctx context.Context, req ListCredentialsRequest) (*ListCredentialsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -575,11 +575,11 @@ func (a *credentialsPreviewImpl) ListCredentials(ctx context.Context, request Li // to access. If the caller is a metastore admin, retrieval of credentials is // unrestricted. There is no guarantee of a specific ordering of the elements in // the array. -func (a *credentialsPreviewImpl) ListCredentialsAll(ctx context.Context, request ListCredentialsRequest) ([]CredentialInfo, error) { +func (a *credentialsImpl) ListCredentialsAll(ctx context.Context, request ListCredentialsRequest) ([]CredentialInfo, error) { iterator := a.ListCredentials(ctx, request) return listing.ToSlice[CredentialInfo](ctx, iterator) } -func (a *credentialsPreviewImpl) internalListCredentials(ctx context.Context, request ListCredentialsRequest) (*ListCredentialsResponse, error) { +func (a *credentialsImpl) internalListCredentials(ctx context.Context, request ListCredentialsRequest) (*ListCredentialsResponse, error) { var listCredentialsResponse ListCredentialsResponse path := "/api/2.1preview/unity-catalog/credentials" queryParams := make(map[string]any) @@ -589,7 +589,7 @@ func (a *credentialsPreviewImpl) internalListCredentials(ctx context.Context, re return &listCredentialsResponse, err } -func (a *credentialsPreviewImpl) UpdateCredential(ctx context.Context, request UpdateCredentialRequest) (*CredentialInfo, error) { +func (a *credentialsImpl) UpdateCredential(ctx context.Context, request UpdateCredentialRequest) (*CredentialInfo, error) { var credentialInfo CredentialInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/credentials/%v", request.NameArg) queryParams := make(map[string]any) @@ -600,7 +600,7 @@ func (a *credentialsPreviewImpl) UpdateCredential(ctx context.Context, request U return &credentialInfo, err } -func (a *credentialsPreviewImpl) ValidateCredential(ctx context.Context, request ValidateCredentialRequest) (*ValidateCredentialResponse, error) { +func (a *credentialsImpl) ValidateCredential(ctx context.Context, request ValidateCredentialRequest) (*ValidateCredentialResponse, error) { var validateCredentialResponse ValidateCredentialResponse path := "/api/2.1preview/unity-catalog/validate-credentials" queryParams := make(map[string]any) @@ -611,12 +611,12 @@ func (a *credentialsPreviewImpl) ValidateCredential(ctx context.Context, request return &validateCredentialResponse, err } -// unexported type that holds implementations of just ExternalLocationsPreview API methods -type externalLocationsPreviewImpl struct { +// unexported type that holds implementations of just ExternalLocations API methods +type externalLocationsImpl struct { client *client.DatabricksClient } -func (a *externalLocationsPreviewImpl) Create(ctx context.Context, request CreateExternalLocation) (*ExternalLocationInfo, error) { +func (a *externalLocationsImpl) Create(ctx context.Context, request CreateExternalLocation) (*ExternalLocationInfo, error) { var externalLocationInfo ExternalLocationInfo path := "/api/2.1preview/unity-catalog/external-locations" queryParams := make(map[string]any) @@ -627,7 +627,7 @@ func (a *externalLocationsPreviewImpl) Create(ctx context.Context, request Creat return &externalLocationInfo, err } -func (a *externalLocationsPreviewImpl) Delete(ctx context.Context, request DeleteExternalLocationRequest) error { +func (a *externalLocationsImpl) Delete(ctx context.Context, request DeleteExternalLocationRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/external-locations/%v", request.Name) queryParams := make(map[string]any) @@ -637,7 +637,7 @@ func (a *externalLocationsPreviewImpl) Delete(ctx context.Context, request Delet return err } -func (a *externalLocationsPreviewImpl) Get(ctx context.Context, request GetExternalLocationRequest) (*ExternalLocationInfo, error) { +func (a *externalLocationsImpl) Get(ctx context.Context, request GetExternalLocationRequest) (*ExternalLocationInfo, error) { var externalLocationInfo ExternalLocationInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/external-locations/%v", request.Name) queryParams := make(map[string]any) @@ -654,7 +654,7 @@ func (a *externalLocationsPreviewImpl) Get(ctx context.Context, request GetExter // external location, or a user that has some privilege on the external // location. There is no guarantee of a specific ordering of the elements in the // array. -func (a *externalLocationsPreviewImpl) List(ctx context.Context, request ListExternalLocationsRequest) listing.Iterator[ExternalLocationInfo] { +func (a *externalLocationsImpl) List(ctx context.Context, request ListExternalLocationsRequest) listing.Iterator[ExternalLocationInfo] { getNextPage := func(ctx context.Context, req ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -685,11 +685,11 @@ func (a *externalLocationsPreviewImpl) List(ctx context.Context, request ListExt // external location, or a user that has some privilege on the external // location. There is no guarantee of a specific ordering of the elements in the // array. -func (a *externalLocationsPreviewImpl) ListAll(ctx context.Context, request ListExternalLocationsRequest) ([]ExternalLocationInfo, error) { +func (a *externalLocationsImpl) ListAll(ctx context.Context, request ListExternalLocationsRequest) ([]ExternalLocationInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[ExternalLocationInfo](ctx, iterator) } -func (a *externalLocationsPreviewImpl) internalList(ctx context.Context, request ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { +func (a *externalLocationsImpl) internalList(ctx context.Context, request ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { var listExternalLocationsResponse ListExternalLocationsResponse path := "/api/2.1preview/unity-catalog/external-locations" queryParams := make(map[string]any) @@ -699,7 +699,7 @@ func (a *externalLocationsPreviewImpl) internalList(ctx context.Context, request return &listExternalLocationsResponse, err } -func (a *externalLocationsPreviewImpl) Update(ctx context.Context, request UpdateExternalLocation) (*ExternalLocationInfo, error) { +func (a *externalLocationsImpl) Update(ctx context.Context, request UpdateExternalLocation) (*ExternalLocationInfo, error) { var externalLocationInfo ExternalLocationInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/external-locations/%v", request.Name) queryParams := make(map[string]any) @@ -710,12 +710,12 @@ func (a *externalLocationsPreviewImpl) Update(ctx context.Context, request Updat return &externalLocationInfo, err } -// unexported type that holds implementations of just FunctionsPreview API methods -type functionsPreviewImpl struct { +// unexported type that holds implementations of just Functions API methods +type functionsImpl struct { client *client.DatabricksClient } -func (a *functionsPreviewImpl) Create(ctx context.Context, request CreateFunctionRequest) (*FunctionInfo, error) { +func (a *functionsImpl) Create(ctx context.Context, request CreateFunctionRequest) (*FunctionInfo, error) { var functionInfo FunctionInfo path := "/api/2.1preview/unity-catalog/functions" queryParams := make(map[string]any) @@ -726,7 +726,7 @@ func (a *functionsPreviewImpl) Create(ctx context.Context, request CreateFunctio return &functionInfo, err } -func (a *functionsPreviewImpl) Delete(ctx context.Context, request DeleteFunctionRequest) error { +func (a *functionsImpl) Delete(ctx context.Context, request DeleteFunctionRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/functions/%v", request.Name) queryParams := make(map[string]any) @@ -736,7 +736,7 @@ func (a *functionsPreviewImpl) Delete(ctx context.Context, request DeleteFunctio return err } -func (a *functionsPreviewImpl) Get(ctx context.Context, request GetFunctionRequest) (*FunctionInfo, error) { +func (a *functionsImpl) Get(ctx context.Context, request GetFunctionRequest) (*FunctionInfo, error) { var functionInfo FunctionInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/functions/%v", request.Name) queryParams := make(map[string]any) @@ -755,7 +755,7 @@ func (a *functionsPreviewImpl) Get(ctx context.Context, request GetFunctionReque // functions for which either the user has the **EXECUTE** privilege or the user // is the owner. There is no guarantee of a specific ordering of the elements in // the array. -func (a *functionsPreviewImpl) List(ctx context.Context, request ListFunctionsRequest) listing.Iterator[FunctionInfo] { +func (a *functionsImpl) List(ctx context.Context, request ListFunctionsRequest) listing.Iterator[FunctionInfo] { getNextPage := func(ctx context.Context, req ListFunctionsRequest) (*ListFunctionsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -788,11 +788,11 @@ func (a *functionsPreviewImpl) List(ctx context.Context, request ListFunctionsRe // functions for which either the user has the **EXECUTE** privilege or the user // is the owner. There is no guarantee of a specific ordering of the elements in // the array. -func (a *functionsPreviewImpl) ListAll(ctx context.Context, request ListFunctionsRequest) ([]FunctionInfo, error) { +func (a *functionsImpl) ListAll(ctx context.Context, request ListFunctionsRequest) ([]FunctionInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[FunctionInfo](ctx, iterator) } -func (a *functionsPreviewImpl) internalList(ctx context.Context, request ListFunctionsRequest) (*ListFunctionsResponse, error) { +func (a *functionsImpl) internalList(ctx context.Context, request ListFunctionsRequest) (*ListFunctionsResponse, error) { var listFunctionsResponse ListFunctionsResponse path := "/api/2.1preview/unity-catalog/functions" queryParams := make(map[string]any) @@ -802,7 +802,7 @@ func (a *functionsPreviewImpl) internalList(ctx context.Context, request ListFun return &listFunctionsResponse, err } -func (a *functionsPreviewImpl) Update(ctx context.Context, request UpdateFunction) (*FunctionInfo, error) { +func (a *functionsImpl) Update(ctx context.Context, request UpdateFunction) (*FunctionInfo, error) { var functionInfo FunctionInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/functions/%v", request.Name) queryParams := make(map[string]any) @@ -813,12 +813,12 @@ func (a *functionsPreviewImpl) Update(ctx context.Context, request UpdateFunctio return &functionInfo, err } -// unexported type that holds implementations of just GrantsPreview API methods -type grantsPreviewImpl struct { +// unexported type that holds implementations of just Grants API methods +type grantsImpl struct { client *client.DatabricksClient } -func (a *grantsPreviewImpl) Get(ctx context.Context, request GetGrantRequest) (*PermissionsList, error) { +func (a *grantsImpl) Get(ctx context.Context, request GetGrantRequest) (*PermissionsList, error) { var permissionsList PermissionsList path := fmt.Sprintf("/api/2.1preview/unity-catalog/permissions/%v/%v", request.SecurableType, request.FullName) queryParams := make(map[string]any) @@ -828,7 +828,7 @@ func (a *grantsPreviewImpl) Get(ctx context.Context, request GetGrantRequest) (* return &permissionsList, err } -func (a *grantsPreviewImpl) GetEffective(ctx context.Context, request GetEffectiveRequest) (*EffectivePermissionsList, error) { +func (a *grantsImpl) GetEffective(ctx context.Context, request GetEffectiveRequest) (*EffectivePermissionsList, error) { var effectivePermissionsList EffectivePermissionsList path := fmt.Sprintf("/api/2.1preview/unity-catalog/effective-permissions/%v/%v", request.SecurableType, request.FullName) queryParams := make(map[string]any) @@ -838,7 +838,7 @@ func (a *grantsPreviewImpl) GetEffective(ctx context.Context, request GetEffecti return &effectivePermissionsList, err } -func (a *grantsPreviewImpl) Update(ctx context.Context, request UpdatePermissions) (*PermissionsList, error) { +func (a *grantsImpl) Update(ctx context.Context, request UpdatePermissions) (*PermissionsList, error) { var permissionsList PermissionsList path := fmt.Sprintf("/api/2.1preview/unity-catalog/permissions/%v/%v", request.SecurableType, request.FullName) queryParams := make(map[string]any) @@ -849,12 +849,12 @@ func (a *grantsPreviewImpl) Update(ctx context.Context, request UpdatePermission return &permissionsList, err } -// unexported type that holds implementations of just MetastoresPreview API methods -type metastoresPreviewImpl struct { +// unexported type that holds implementations of just Metastores API methods +type metastoresImpl struct { client *client.DatabricksClient } -func (a *metastoresPreviewImpl) Assign(ctx context.Context, request CreateMetastoreAssignment) error { +func (a *metastoresImpl) Assign(ctx context.Context, request CreateMetastoreAssignment) error { var assignResponse AssignResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/workspaces/%v/metastore", request.WorkspaceId) queryParams := make(map[string]any) @@ -865,7 +865,7 @@ func (a *metastoresPreviewImpl) Assign(ctx context.Context, request CreateMetast return err } -func (a *metastoresPreviewImpl) Create(ctx context.Context, request CreateMetastore) (*MetastoreInfo, error) { +func (a *metastoresImpl) Create(ctx context.Context, request CreateMetastore) (*MetastoreInfo, error) { var metastoreInfo MetastoreInfo path := "/api/2.1preview/unity-catalog/metastores" queryParams := make(map[string]any) @@ -876,7 +876,7 @@ func (a *metastoresPreviewImpl) Create(ctx context.Context, request CreateMetast return &metastoreInfo, err } -func (a *metastoresPreviewImpl) Current(ctx context.Context) (*MetastoreAssignment, error) { +func (a *metastoresImpl) Current(ctx context.Context) (*MetastoreAssignment, error) { var metastoreAssignment MetastoreAssignment path := "/api/2.1preview/unity-catalog/current-metastore-assignment" @@ -886,7 +886,7 @@ func (a *metastoresPreviewImpl) Current(ctx context.Context) (*MetastoreAssignme return &metastoreAssignment, err } -func (a *metastoresPreviewImpl) Delete(ctx context.Context, request DeleteMetastoreRequest) error { +func (a *metastoresImpl) Delete(ctx context.Context, request DeleteMetastoreRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v", request.Id) queryParams := make(map[string]any) @@ -896,7 +896,7 @@ func (a *metastoresPreviewImpl) Delete(ctx context.Context, request DeleteMetast return err } -func (a *metastoresPreviewImpl) Get(ctx context.Context, request GetMetastoreRequest) (*MetastoreInfo, error) { +func (a *metastoresImpl) Get(ctx context.Context, request GetMetastoreRequest) (*MetastoreInfo, error) { var metastoreInfo MetastoreInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v", request.Id) queryParams := make(map[string]any) @@ -911,7 +911,7 @@ func (a *metastoresPreviewImpl) Get(ctx context.Context, request GetMetastoreReq // Gets an array of the available metastores (as __MetastoreInfo__ objects). The // caller must be an admin to retrieve this info. There is no guarantee of a // specific ordering of the elements in the array. -func (a *metastoresPreviewImpl) List(ctx context.Context) listing.Iterator[MetastoreInfo] { +func (a *metastoresImpl) List(ctx context.Context) listing.Iterator[MetastoreInfo] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*ListMetastoresResponse, error) { @@ -935,11 +935,11 @@ func (a *metastoresPreviewImpl) List(ctx context.Context) listing.Iterator[Metas // Gets an array of the available metastores (as __MetastoreInfo__ objects). The // caller must be an admin to retrieve this info. There is no guarantee of a // specific ordering of the elements in the array. -func (a *metastoresPreviewImpl) ListAll(ctx context.Context) ([]MetastoreInfo, error) { +func (a *metastoresImpl) ListAll(ctx context.Context) ([]MetastoreInfo, error) { iterator := a.List(ctx) return listing.ToSlice[MetastoreInfo](ctx, iterator) } -func (a *metastoresPreviewImpl) internalList(ctx context.Context) (*ListMetastoresResponse, error) { +func (a *metastoresImpl) internalList(ctx context.Context) (*ListMetastoresResponse, error) { var listMetastoresResponse ListMetastoresResponse path := "/api/2.1preview/unity-catalog/metastores" @@ -949,7 +949,7 @@ func (a *metastoresPreviewImpl) internalList(ctx context.Context) (*ListMetastor return &listMetastoresResponse, err } -func (a *metastoresPreviewImpl) Summary(ctx context.Context) (*GetMetastoreSummaryResponse, error) { +func (a *metastoresImpl) Summary(ctx context.Context) (*GetMetastoreSummaryResponse, error) { var getMetastoreSummaryResponse GetMetastoreSummaryResponse path := "/api/2.1preview/unity-catalog/metastore_summary" @@ -959,7 +959,7 @@ func (a *metastoresPreviewImpl) Summary(ctx context.Context) (*GetMetastoreSumma return &getMetastoreSummaryResponse, err } -func (a *metastoresPreviewImpl) Unassign(ctx context.Context, request UnassignRequest) error { +func (a *metastoresImpl) Unassign(ctx context.Context, request UnassignRequest) error { var unassignResponse UnassignResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/workspaces/%v/metastore", request.WorkspaceId) queryParams := make(map[string]any) @@ -969,7 +969,7 @@ func (a *metastoresPreviewImpl) Unassign(ctx context.Context, request UnassignRe return err } -func (a *metastoresPreviewImpl) Update(ctx context.Context, request UpdateMetastore) (*MetastoreInfo, error) { +func (a *metastoresImpl) Update(ctx context.Context, request UpdateMetastore) (*MetastoreInfo, error) { var metastoreInfo MetastoreInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v", request.Id) queryParams := make(map[string]any) @@ -980,7 +980,7 @@ func (a *metastoresPreviewImpl) Update(ctx context.Context, request UpdateMetast return &metastoreInfo, err } -func (a *metastoresPreviewImpl) UpdateAssignment(ctx context.Context, request UpdateMetastoreAssignment) error { +func (a *metastoresImpl) UpdateAssignment(ctx context.Context, request UpdateMetastoreAssignment) error { var updateAssignmentResponse UpdateAssignmentResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/workspaces/%v/metastore", request.WorkspaceId) queryParams := make(map[string]any) @@ -991,12 +991,12 @@ func (a *metastoresPreviewImpl) UpdateAssignment(ctx context.Context, request Up return err } -// unexported type that holds implementations of just ModelVersionsPreview API methods -type modelVersionsPreviewImpl struct { +// unexported type that holds implementations of just ModelVersions API methods +type modelVersionsImpl struct { client *client.DatabricksClient } -func (a *modelVersionsPreviewImpl) Delete(ctx context.Context, request DeleteModelVersionRequest) error { +func (a *modelVersionsImpl) Delete(ctx context.Context, request DeleteModelVersionRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/versions/%v", request.FullName, request.Version) queryParams := make(map[string]any) @@ -1005,7 +1005,7 @@ func (a *modelVersionsPreviewImpl) Delete(ctx context.Context, request DeleteMod return err } -func (a *modelVersionsPreviewImpl) Get(ctx context.Context, request GetModelVersionRequest) (*ModelVersionInfo, error) { +func (a *modelVersionsImpl) Get(ctx context.Context, request GetModelVersionRequest) (*ModelVersionInfo, error) { var modelVersionInfo ModelVersionInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/versions/%v", request.FullName, request.Version) queryParams := make(map[string]any) @@ -1015,7 +1015,7 @@ func (a *modelVersionsPreviewImpl) Get(ctx context.Context, request GetModelVers return &modelVersionInfo, err } -func (a *modelVersionsPreviewImpl) GetByAlias(ctx context.Context, request GetByAliasRequest) (*ModelVersionInfo, error) { +func (a *modelVersionsImpl) GetByAlias(ctx context.Context, request GetByAliasRequest) (*ModelVersionInfo, error) { var modelVersionInfo ModelVersionInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/aliases/%v", request.FullName, request.Alias) queryParams := make(map[string]any) @@ -1040,7 +1040,7 @@ func (a *modelVersionsPreviewImpl) GetByAlias(ctx context.Context, request GetBy // // There is no guarantee of a specific ordering of the elements in the response. // The elements in the response will not contain any aliases or tags. -func (a *modelVersionsPreviewImpl) List(ctx context.Context, request ListModelVersionsRequest) listing.Iterator[ModelVersionInfo] { +func (a *modelVersionsImpl) List(ctx context.Context, request ListModelVersionsRequest) listing.Iterator[ModelVersionInfo] { getNextPage := func(ctx context.Context, req ListModelVersionsRequest) (*ListModelVersionsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1079,11 +1079,11 @@ func (a *modelVersionsPreviewImpl) List(ctx context.Context, request ListModelVe // // There is no guarantee of a specific ordering of the elements in the response. // The elements in the response will not contain any aliases or tags. -func (a *modelVersionsPreviewImpl) ListAll(ctx context.Context, request ListModelVersionsRequest) ([]ModelVersionInfo, error) { +func (a *modelVersionsImpl) ListAll(ctx context.Context, request ListModelVersionsRequest) ([]ModelVersionInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[ModelVersionInfo](ctx, iterator) } -func (a *modelVersionsPreviewImpl) internalList(ctx context.Context, request ListModelVersionsRequest) (*ListModelVersionsResponse, error) { +func (a *modelVersionsImpl) internalList(ctx context.Context, request ListModelVersionsRequest) (*ListModelVersionsResponse, error) { var listModelVersionsResponse ListModelVersionsResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/versions", request.FullName) queryParams := make(map[string]any) @@ -1093,7 +1093,7 @@ func (a *modelVersionsPreviewImpl) internalList(ctx context.Context, request Lis return &listModelVersionsResponse, err } -func (a *modelVersionsPreviewImpl) Update(ctx context.Context, request UpdateModelVersionRequest) (*ModelVersionInfo, error) { +func (a *modelVersionsImpl) Update(ctx context.Context, request UpdateModelVersionRequest) (*ModelVersionInfo, error) { var modelVersionInfo ModelVersionInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/versions/%v", request.FullName, request.Version) queryParams := make(map[string]any) @@ -1104,12 +1104,12 @@ func (a *modelVersionsPreviewImpl) Update(ctx context.Context, request UpdateMod return &modelVersionInfo, err } -// unexported type that holds implementations of just OnlineTablesPreview API methods -type onlineTablesPreviewImpl struct { +// unexported type that holds implementations of just OnlineTables API methods +type onlineTablesImpl struct { client *client.DatabricksClient } -func (a *onlineTablesPreviewImpl) Create(ctx context.Context, request CreateOnlineTableRequest) (*OnlineTable, error) { +func (a *onlineTablesImpl) Create(ctx context.Context, request CreateOnlineTableRequest) (*OnlineTable, error) { var onlineTable OnlineTable path := "/api/2.0preview/online-tables" queryParams := make(map[string]any) @@ -1120,7 +1120,7 @@ func (a *onlineTablesPreviewImpl) Create(ctx context.Context, request CreateOnli return &onlineTable, err } -func (a *onlineTablesPreviewImpl) Delete(ctx context.Context, request DeleteOnlineTableRequest) error { +func (a *onlineTablesImpl) Delete(ctx context.Context, request DeleteOnlineTableRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/online-tables/%v", request.Name) queryParams := make(map[string]any) @@ -1130,7 +1130,7 @@ func (a *onlineTablesPreviewImpl) Delete(ctx context.Context, request DeleteOnli return err } -func (a *onlineTablesPreviewImpl) Get(ctx context.Context, request GetOnlineTableRequest) (*OnlineTable, error) { +func (a *onlineTablesImpl) Get(ctx context.Context, request GetOnlineTableRequest) (*OnlineTable, error) { var onlineTable OnlineTable path := fmt.Sprintf("/api/2.0preview/online-tables/%v", request.Name) queryParams := make(map[string]any) @@ -1140,12 +1140,12 @@ func (a *onlineTablesPreviewImpl) Get(ctx context.Context, request GetOnlineTabl return &onlineTable, err } -// unexported type that holds implementations of just QualityMonitorsPreview API methods -type qualityMonitorsPreviewImpl struct { +// unexported type that holds implementations of just QualityMonitors API methods +type qualityMonitorsImpl struct { client *client.DatabricksClient } -func (a *qualityMonitorsPreviewImpl) CancelRefresh(ctx context.Context, request CancelRefreshRequest) error { +func (a *qualityMonitorsImpl) CancelRefresh(ctx context.Context, request CancelRefreshRequest) error { var cancelRefreshResponse CancelRefreshResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor/refreshes/%v/cancel", request.TableName, request.RefreshId) queryParams := make(map[string]any) @@ -1154,7 +1154,7 @@ func (a *qualityMonitorsPreviewImpl) CancelRefresh(ctx context.Context, request return err } -func (a *qualityMonitorsPreviewImpl) Create(ctx context.Context, request CreateMonitor) (*MonitorInfo, error) { +func (a *qualityMonitorsImpl) Create(ctx context.Context, request CreateMonitor) (*MonitorInfo, error) { var monitorInfo MonitorInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor", request.TableName) queryParams := make(map[string]any) @@ -1165,7 +1165,7 @@ func (a *qualityMonitorsPreviewImpl) Create(ctx context.Context, request CreateM return &monitorInfo, err } -func (a *qualityMonitorsPreviewImpl) Delete(ctx context.Context, request DeleteQualityMonitorRequest) error { +func (a *qualityMonitorsImpl) Delete(ctx context.Context, request DeleteQualityMonitorRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor", request.TableName) queryParams := make(map[string]any) @@ -1174,7 +1174,7 @@ func (a *qualityMonitorsPreviewImpl) Delete(ctx context.Context, request DeleteQ return err } -func (a *qualityMonitorsPreviewImpl) Get(ctx context.Context, request GetQualityMonitorRequest) (*MonitorInfo, error) { +func (a *qualityMonitorsImpl) Get(ctx context.Context, request GetQualityMonitorRequest) (*MonitorInfo, error) { var monitorInfo MonitorInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor", request.TableName) queryParams := make(map[string]any) @@ -1184,7 +1184,7 @@ func (a *qualityMonitorsPreviewImpl) Get(ctx context.Context, request GetQuality return &monitorInfo, err } -func (a *qualityMonitorsPreviewImpl) GetRefresh(ctx context.Context, request GetRefreshRequest) (*MonitorRefreshInfo, error) { +func (a *qualityMonitorsImpl) GetRefresh(ctx context.Context, request GetRefreshRequest) (*MonitorRefreshInfo, error) { var monitorRefreshInfo MonitorRefreshInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor/refreshes/%v", request.TableName, request.RefreshId) queryParams := make(map[string]any) @@ -1194,7 +1194,7 @@ func (a *qualityMonitorsPreviewImpl) GetRefresh(ctx context.Context, request Get return &monitorRefreshInfo, err } -func (a *qualityMonitorsPreviewImpl) ListRefreshes(ctx context.Context, request ListRefreshesRequest) (*MonitorRefreshListResponse, error) { +func (a *qualityMonitorsImpl) ListRefreshes(ctx context.Context, request ListRefreshesRequest) (*MonitorRefreshListResponse, error) { var monitorRefreshListResponse MonitorRefreshListResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor/refreshes", request.TableName) queryParams := make(map[string]any) @@ -1204,7 +1204,7 @@ func (a *qualityMonitorsPreviewImpl) ListRefreshes(ctx context.Context, request return &monitorRefreshListResponse, err } -func (a *qualityMonitorsPreviewImpl) RegenerateDashboard(ctx context.Context, request RegenerateDashboardRequest) (*RegenerateDashboardResponse, error) { +func (a *qualityMonitorsImpl) RegenerateDashboard(ctx context.Context, request RegenerateDashboardRequest) (*RegenerateDashboardResponse, error) { var regenerateDashboardResponse RegenerateDashboardResponse path := fmt.Sprintf("/api/2.1preview/quality-monitoring/tables/%v/monitor/dashboard", request.TableName) queryParams := make(map[string]any) @@ -1215,7 +1215,7 @@ func (a *qualityMonitorsPreviewImpl) RegenerateDashboard(ctx context.Context, re return ®enerateDashboardResponse, err } -func (a *qualityMonitorsPreviewImpl) RunRefresh(ctx context.Context, request RunRefreshRequest) (*MonitorRefreshInfo, error) { +func (a *qualityMonitorsImpl) RunRefresh(ctx context.Context, request RunRefreshRequest) (*MonitorRefreshInfo, error) { var monitorRefreshInfo MonitorRefreshInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor/refreshes", request.TableName) queryParams := make(map[string]any) @@ -1225,7 +1225,7 @@ func (a *qualityMonitorsPreviewImpl) RunRefresh(ctx context.Context, request Run return &monitorRefreshInfo, err } -func (a *qualityMonitorsPreviewImpl) Update(ctx context.Context, request UpdateMonitor) (*MonitorInfo, error) { +func (a *qualityMonitorsImpl) Update(ctx context.Context, request UpdateMonitor) (*MonitorInfo, error) { var monitorInfo MonitorInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/monitor", request.TableName) queryParams := make(map[string]any) @@ -1236,12 +1236,12 @@ func (a *qualityMonitorsPreviewImpl) Update(ctx context.Context, request UpdateM return &monitorInfo, err } -// unexported type that holds implementations of just RegisteredModelsPreview API methods -type registeredModelsPreviewImpl struct { +// unexported type that holds implementations of just RegisteredModels API methods +type registeredModelsImpl struct { client *client.DatabricksClient } -func (a *registeredModelsPreviewImpl) Create(ctx context.Context, request CreateRegisteredModelRequest) (*RegisteredModelInfo, error) { +func (a *registeredModelsImpl) Create(ctx context.Context, request CreateRegisteredModelRequest) (*RegisteredModelInfo, error) { var registeredModelInfo RegisteredModelInfo path := "/api/2.1preview/unity-catalog/models" queryParams := make(map[string]any) @@ -1252,7 +1252,7 @@ func (a *registeredModelsPreviewImpl) Create(ctx context.Context, request Create return ®isteredModelInfo, err } -func (a *registeredModelsPreviewImpl) Delete(ctx context.Context, request DeleteRegisteredModelRequest) error { +func (a *registeredModelsImpl) Delete(ctx context.Context, request DeleteRegisteredModelRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v", request.FullName) queryParams := make(map[string]any) @@ -1261,7 +1261,7 @@ func (a *registeredModelsPreviewImpl) Delete(ctx context.Context, request Delete return err } -func (a *registeredModelsPreviewImpl) DeleteAlias(ctx context.Context, request DeleteAliasRequest) error { +func (a *registeredModelsImpl) DeleteAlias(ctx context.Context, request DeleteAliasRequest) error { var deleteAliasResponse DeleteAliasResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/aliases/%v", request.FullName, request.Alias) queryParams := make(map[string]any) @@ -1270,7 +1270,7 @@ func (a *registeredModelsPreviewImpl) DeleteAlias(ctx context.Context, request D return err } -func (a *registeredModelsPreviewImpl) Get(ctx context.Context, request GetRegisteredModelRequest) (*RegisteredModelInfo, error) { +func (a *registeredModelsImpl) Get(ctx context.Context, request GetRegisteredModelRequest) (*RegisteredModelInfo, error) { var registeredModelInfo RegisteredModelInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v", request.FullName) queryParams := make(map[string]any) @@ -1294,7 +1294,7 @@ func (a *registeredModelsPreviewImpl) Get(ctx context.Context, request GetRegist // parent schema. // // There is no guarantee of a specific ordering of the elements in the response. -func (a *registeredModelsPreviewImpl) List(ctx context.Context, request ListRegisteredModelsRequest) listing.Iterator[RegisteredModelInfo] { +func (a *registeredModelsImpl) List(ctx context.Context, request ListRegisteredModelsRequest) listing.Iterator[RegisteredModelInfo] { getNextPage := func(ctx context.Context, req ListRegisteredModelsRequest) (*ListRegisteredModelsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1332,11 +1332,11 @@ func (a *registeredModelsPreviewImpl) List(ctx context.Context, request ListRegi // parent schema. // // There is no guarantee of a specific ordering of the elements in the response. -func (a *registeredModelsPreviewImpl) ListAll(ctx context.Context, request ListRegisteredModelsRequest) ([]RegisteredModelInfo, error) { +func (a *registeredModelsImpl) ListAll(ctx context.Context, request ListRegisteredModelsRequest) ([]RegisteredModelInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[RegisteredModelInfo](ctx, iterator) } -func (a *registeredModelsPreviewImpl) internalList(ctx context.Context, request ListRegisteredModelsRequest) (*ListRegisteredModelsResponse, error) { +func (a *registeredModelsImpl) internalList(ctx context.Context, request ListRegisteredModelsRequest) (*ListRegisteredModelsResponse, error) { var listRegisteredModelsResponse ListRegisteredModelsResponse path := "/api/2.1preview/unity-catalog/models" queryParams := make(map[string]any) @@ -1346,7 +1346,7 @@ func (a *registeredModelsPreviewImpl) internalList(ctx context.Context, request return &listRegisteredModelsResponse, err } -func (a *registeredModelsPreviewImpl) SetAlias(ctx context.Context, request SetRegisteredModelAliasRequest) (*RegisteredModelAlias, error) { +func (a *registeredModelsImpl) SetAlias(ctx context.Context, request SetRegisteredModelAliasRequest) (*RegisteredModelAlias, error) { var registeredModelAlias RegisteredModelAlias path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v/aliases/%v", request.FullName, request.Alias) queryParams := make(map[string]any) @@ -1357,7 +1357,7 @@ func (a *registeredModelsPreviewImpl) SetAlias(ctx context.Context, request SetR return ®isteredModelAlias, err } -func (a *registeredModelsPreviewImpl) Update(ctx context.Context, request UpdateRegisteredModelRequest) (*RegisteredModelInfo, error) { +func (a *registeredModelsImpl) Update(ctx context.Context, request UpdateRegisteredModelRequest) (*RegisteredModelInfo, error) { var registeredModelInfo RegisteredModelInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/models/%v", request.FullName) queryParams := make(map[string]any) @@ -1368,12 +1368,12 @@ func (a *registeredModelsPreviewImpl) Update(ctx context.Context, request Update return ®isteredModelInfo, err } -// unexported type that holds implementations of just ResourceQuotasPreview API methods -type resourceQuotasPreviewImpl struct { +// unexported type that holds implementations of just ResourceQuotas API methods +type resourceQuotasImpl struct { client *client.DatabricksClient } -func (a *resourceQuotasPreviewImpl) GetQuota(ctx context.Context, request GetQuotaRequest) (*GetQuotaResponse, error) { +func (a *resourceQuotasImpl) GetQuota(ctx context.Context, request GetQuotaRequest) (*GetQuotaResponse, error) { var getQuotaResponse GetQuotaResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/resource-quotas/%v/%v/%v", request.ParentSecurableType, request.ParentFullName, request.QuotaName) queryParams := make(map[string]any) @@ -1388,7 +1388,7 @@ func (a *resourceQuotasPreviewImpl) GetQuota(ctx context.Context, request GetQuo // ListQuotas returns all quota values under the metastore. There are no SLAs on // the freshness of the counts returned. This API does not trigger a refresh of // quota counts. -func (a *resourceQuotasPreviewImpl) ListQuotas(ctx context.Context, request ListQuotasRequest) listing.Iterator[QuotaInfo] { +func (a *resourceQuotasImpl) ListQuotas(ctx context.Context, request ListQuotasRequest) listing.Iterator[QuotaInfo] { getNextPage := func(ctx context.Context, req ListQuotasRequest) (*ListQuotasResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1417,11 +1417,11 @@ func (a *resourceQuotasPreviewImpl) ListQuotas(ctx context.Context, request List // ListQuotas returns all quota values under the metastore. There are no SLAs on // the freshness of the counts returned. This API does not trigger a refresh of // quota counts. -func (a *resourceQuotasPreviewImpl) ListQuotasAll(ctx context.Context, request ListQuotasRequest) ([]QuotaInfo, error) { +func (a *resourceQuotasImpl) ListQuotasAll(ctx context.Context, request ListQuotasRequest) ([]QuotaInfo, error) { iterator := a.ListQuotas(ctx, request) return listing.ToSlice[QuotaInfo](ctx, iterator) } -func (a *resourceQuotasPreviewImpl) internalListQuotas(ctx context.Context, request ListQuotasRequest) (*ListQuotasResponse, error) { +func (a *resourceQuotasImpl) internalListQuotas(ctx context.Context, request ListQuotasRequest) (*ListQuotasResponse, error) { var listQuotasResponse ListQuotasResponse path := "/api/2.1preview/unity-catalog/resource-quotas/all-resource-quotas" queryParams := make(map[string]any) @@ -1431,12 +1431,12 @@ func (a *resourceQuotasPreviewImpl) internalListQuotas(ctx context.Context, requ return &listQuotasResponse, err } -// unexported type that holds implementations of just SchemasPreview API methods -type schemasPreviewImpl struct { +// unexported type that holds implementations of just Schemas API methods +type schemasImpl struct { client *client.DatabricksClient } -func (a *schemasPreviewImpl) Create(ctx context.Context, request CreateSchema) (*SchemaInfo, error) { +func (a *schemasImpl) Create(ctx context.Context, request CreateSchema) (*SchemaInfo, error) { var schemaInfo SchemaInfo path := "/api/2.1preview/unity-catalog/schemas" queryParams := make(map[string]any) @@ -1447,7 +1447,7 @@ func (a *schemasPreviewImpl) Create(ctx context.Context, request CreateSchema) ( return &schemaInfo, err } -func (a *schemasPreviewImpl) Delete(ctx context.Context, request DeleteSchemaRequest) error { +func (a *schemasImpl) Delete(ctx context.Context, request DeleteSchemaRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/schemas/%v", request.FullName) queryParams := make(map[string]any) @@ -1457,7 +1457,7 @@ func (a *schemasPreviewImpl) Delete(ctx context.Context, request DeleteSchemaReq return err } -func (a *schemasPreviewImpl) Get(ctx context.Context, request GetSchemaRequest) (*SchemaInfo, error) { +func (a *schemasImpl) Get(ctx context.Context, request GetSchemaRequest) (*SchemaInfo, error) { var schemaInfo SchemaInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/schemas/%v", request.FullName) queryParams := make(map[string]any) @@ -1474,7 +1474,7 @@ func (a *schemasPreviewImpl) Get(ctx context.Context, request GetSchemaRequest) // catalog will be retrieved. Otherwise, only schemas owned by the caller (or // for which the caller has the **USE_SCHEMA** privilege) will be retrieved. // There is no guarantee of a specific ordering of the elements in the array. -func (a *schemasPreviewImpl) List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo] { +func (a *schemasImpl) List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo] { getNextPage := func(ctx context.Context, req ListSchemasRequest) (*ListSchemasResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1505,11 +1505,11 @@ func (a *schemasPreviewImpl) List(ctx context.Context, request ListSchemasReques // catalog will be retrieved. Otherwise, only schemas owned by the caller (or // for which the caller has the **USE_SCHEMA** privilege) will be retrieved. // There is no guarantee of a specific ordering of the elements in the array. -func (a *schemasPreviewImpl) ListAll(ctx context.Context, request ListSchemasRequest) ([]SchemaInfo, error) { +func (a *schemasImpl) ListAll(ctx context.Context, request ListSchemasRequest) ([]SchemaInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[SchemaInfo](ctx, iterator) } -func (a *schemasPreviewImpl) internalList(ctx context.Context, request ListSchemasRequest) (*ListSchemasResponse, error) { +func (a *schemasImpl) internalList(ctx context.Context, request ListSchemasRequest) (*ListSchemasResponse, error) { var listSchemasResponse ListSchemasResponse path := "/api/2.1preview/unity-catalog/schemas" queryParams := make(map[string]any) @@ -1519,7 +1519,7 @@ func (a *schemasPreviewImpl) internalList(ctx context.Context, request ListSchem return &listSchemasResponse, err } -func (a *schemasPreviewImpl) Update(ctx context.Context, request UpdateSchema) (*SchemaInfo, error) { +func (a *schemasImpl) Update(ctx context.Context, request UpdateSchema) (*SchemaInfo, error) { var schemaInfo SchemaInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/schemas/%v", request.FullName) queryParams := make(map[string]any) @@ -1530,12 +1530,12 @@ func (a *schemasPreviewImpl) Update(ctx context.Context, request UpdateSchema) ( return &schemaInfo, err } -// unexported type that holds implementations of just StorageCredentialsPreview API methods -type storageCredentialsPreviewImpl struct { +// unexported type that holds implementations of just StorageCredentials API methods +type storageCredentialsImpl struct { client *client.DatabricksClient } -func (a *storageCredentialsPreviewImpl) Create(ctx context.Context, request CreateStorageCredential) (*StorageCredentialInfo, error) { +func (a *storageCredentialsImpl) Create(ctx context.Context, request CreateStorageCredential) (*StorageCredentialInfo, error) { var storageCredentialInfo StorageCredentialInfo path := "/api/2.1preview/unity-catalog/storage-credentials" queryParams := make(map[string]any) @@ -1546,7 +1546,7 @@ func (a *storageCredentialsPreviewImpl) Create(ctx context.Context, request Crea return &storageCredentialInfo, err } -func (a *storageCredentialsPreviewImpl) Delete(ctx context.Context, request DeleteStorageCredentialRequest) error { +func (a *storageCredentialsImpl) Delete(ctx context.Context, request DeleteStorageCredentialRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/storage-credentials/%v", request.Name) queryParams := make(map[string]any) @@ -1556,7 +1556,7 @@ func (a *storageCredentialsPreviewImpl) Delete(ctx context.Context, request Dele return err } -func (a *storageCredentialsPreviewImpl) Get(ctx context.Context, request GetStorageCredentialRequest) (*StorageCredentialInfo, error) { +func (a *storageCredentialsImpl) Get(ctx context.Context, request GetStorageCredentialRequest) (*StorageCredentialInfo, error) { var storageCredentialInfo StorageCredentialInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/storage-credentials/%v", request.Name) queryParams := make(map[string]any) @@ -1573,7 +1573,7 @@ func (a *storageCredentialsPreviewImpl) Get(ctx context.Context, request GetStor // permission to access. If the caller is a metastore admin, retrieval of // credentials is unrestricted. There is no guarantee of a specific ordering of // the elements in the array. -func (a *storageCredentialsPreviewImpl) List(ctx context.Context, request ListStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { +func (a *storageCredentialsImpl) List(ctx context.Context, request ListStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { getNextPage := func(ctx context.Context, req ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1604,11 +1604,11 @@ func (a *storageCredentialsPreviewImpl) List(ctx context.Context, request ListSt // permission to access. If the caller is a metastore admin, retrieval of // credentials is unrestricted. There is no guarantee of a specific ordering of // the elements in the array. -func (a *storageCredentialsPreviewImpl) ListAll(ctx context.Context, request ListStorageCredentialsRequest) ([]StorageCredentialInfo, error) { +func (a *storageCredentialsImpl) ListAll(ctx context.Context, request ListStorageCredentialsRequest) ([]StorageCredentialInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[StorageCredentialInfo](ctx, iterator) } -func (a *storageCredentialsPreviewImpl) internalList(ctx context.Context, request ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { +func (a *storageCredentialsImpl) internalList(ctx context.Context, request ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { var listStorageCredentialsResponse ListStorageCredentialsResponse path := "/api/2.1preview/unity-catalog/storage-credentials" queryParams := make(map[string]any) @@ -1618,7 +1618,7 @@ func (a *storageCredentialsPreviewImpl) internalList(ctx context.Context, reques return &listStorageCredentialsResponse, err } -func (a *storageCredentialsPreviewImpl) Update(ctx context.Context, request UpdateStorageCredential) (*StorageCredentialInfo, error) { +func (a *storageCredentialsImpl) Update(ctx context.Context, request UpdateStorageCredential) (*StorageCredentialInfo, error) { var storageCredentialInfo StorageCredentialInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/storage-credentials/%v", request.Name) queryParams := make(map[string]any) @@ -1629,7 +1629,7 @@ func (a *storageCredentialsPreviewImpl) Update(ctx context.Context, request Upda return &storageCredentialInfo, err } -func (a *storageCredentialsPreviewImpl) Validate(ctx context.Context, request ValidateStorageCredential) (*ValidateStorageCredentialResponse, error) { +func (a *storageCredentialsImpl) Validate(ctx context.Context, request ValidateStorageCredential) (*ValidateStorageCredentialResponse, error) { var validateStorageCredentialResponse ValidateStorageCredentialResponse path := "/api/2.1preview/unity-catalog/validate-storage-credentials" queryParams := make(map[string]any) @@ -1640,12 +1640,12 @@ func (a *storageCredentialsPreviewImpl) Validate(ctx context.Context, request Va return &validateStorageCredentialResponse, err } -// unexported type that holds implementations of just SystemSchemasPreview API methods -type systemSchemasPreviewImpl struct { +// unexported type that holds implementations of just SystemSchemas API methods +type systemSchemasImpl struct { client *client.DatabricksClient } -func (a *systemSchemasPreviewImpl) Disable(ctx context.Context, request DisableRequest) error { +func (a *systemSchemasImpl) Disable(ctx context.Context, request DisableRequest) error { var disableResponse DisableResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v/systemschemas/%v", request.MetastoreId, request.SchemaName) queryParams := make(map[string]any) @@ -1655,7 +1655,7 @@ func (a *systemSchemasPreviewImpl) Disable(ctx context.Context, request DisableR return err } -func (a *systemSchemasPreviewImpl) Enable(ctx context.Context, request EnableRequest) error { +func (a *systemSchemasImpl) Enable(ctx context.Context, request EnableRequest) error { var enableResponse EnableResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v/systemschemas/%v", request.MetastoreId, request.SchemaName) queryParams := make(map[string]any) @@ -1669,7 +1669,7 @@ func (a *systemSchemasPreviewImpl) Enable(ctx context.Context, request EnableReq // // Gets an array of system schemas for a metastore. The caller must be an // account admin or a metastore admin. -func (a *systemSchemasPreviewImpl) List(ctx context.Context, request ListSystemSchemasRequest) listing.Iterator[SystemSchemaInfo] { +func (a *systemSchemasImpl) List(ctx context.Context, request ListSystemSchemasRequest) listing.Iterator[SystemSchemaInfo] { getNextPage := func(ctx context.Context, req ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1697,11 +1697,11 @@ func (a *systemSchemasPreviewImpl) List(ctx context.Context, request ListSystemS // // Gets an array of system schemas for a metastore. The caller must be an // account admin or a metastore admin. -func (a *systemSchemasPreviewImpl) ListAll(ctx context.Context, request ListSystemSchemasRequest) ([]SystemSchemaInfo, error) { +func (a *systemSchemasImpl) ListAll(ctx context.Context, request ListSystemSchemasRequest) ([]SystemSchemaInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[SystemSchemaInfo](ctx, iterator) } -func (a *systemSchemasPreviewImpl) internalList(ctx context.Context, request ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { +func (a *systemSchemasImpl) internalList(ctx context.Context, request ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { var listSystemSchemasResponse ListSystemSchemasResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/metastores/%v/systemschemas", request.MetastoreId) queryParams := make(map[string]any) @@ -1711,12 +1711,12 @@ func (a *systemSchemasPreviewImpl) internalList(ctx context.Context, request Lis return &listSystemSchemasResponse, err } -// unexported type that holds implementations of just TableConstraintsPreview API methods -type tableConstraintsPreviewImpl struct { +// unexported type that holds implementations of just TableConstraints API methods +type tableConstraintsImpl struct { client *client.DatabricksClient } -func (a *tableConstraintsPreviewImpl) Create(ctx context.Context, request CreateTableConstraint) (*TableConstraint, error) { +func (a *tableConstraintsImpl) Create(ctx context.Context, request CreateTableConstraint) (*TableConstraint, error) { var tableConstraint TableConstraint path := "/api/2.1preview/unity-catalog/constraints" queryParams := make(map[string]any) @@ -1727,7 +1727,7 @@ func (a *tableConstraintsPreviewImpl) Create(ctx context.Context, request Create return &tableConstraint, err } -func (a *tableConstraintsPreviewImpl) Delete(ctx context.Context, request DeleteTableConstraintRequest) error { +func (a *tableConstraintsImpl) Delete(ctx context.Context, request DeleteTableConstraintRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/constraints/%v", request.FullName) queryParams := make(map[string]any) @@ -1737,12 +1737,12 @@ func (a *tableConstraintsPreviewImpl) Delete(ctx context.Context, request Delete return err } -// unexported type that holds implementations of just TablesPreview API methods -type tablesPreviewImpl struct { +// unexported type that holds implementations of just Tables API methods +type tablesImpl struct { client *client.DatabricksClient } -func (a *tablesPreviewImpl) Delete(ctx context.Context, request DeleteTableRequest) error { +func (a *tablesImpl) Delete(ctx context.Context, request DeleteTableRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v", request.FullName) queryParams := make(map[string]any) @@ -1752,7 +1752,7 @@ func (a *tablesPreviewImpl) Delete(ctx context.Context, request DeleteTableReque return err } -func (a *tablesPreviewImpl) Exists(ctx context.Context, request ExistsRequest) (*TableExistsResponse, error) { +func (a *tablesImpl) Exists(ctx context.Context, request ExistsRequest) (*TableExistsResponse, error) { var tableExistsResponse TableExistsResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v/exists", request.FullName) queryParams := make(map[string]any) @@ -1762,7 +1762,7 @@ func (a *tablesPreviewImpl) Exists(ctx context.Context, request ExistsRequest) ( return &tableExistsResponse, err } -func (a *tablesPreviewImpl) Get(ctx context.Context, request GetTableRequest) (*TableInfo, error) { +func (a *tablesImpl) Get(ctx context.Context, request GetTableRequest) (*TableInfo, error) { var tableInfo TableInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v", request.FullName) queryParams := make(map[string]any) @@ -1780,7 +1780,7 @@ func (a *tablesPreviewImpl) Get(ctx context.Context, request GetTableRequest) (* // must also be the owner or have the **USE_CATALOG** privilege on the parent // catalog and the **USE_SCHEMA** privilege on the parent schema. There is no // guarantee of a specific ordering of the elements in the array. -func (a *tablesPreviewImpl) List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo] { +func (a *tablesImpl) List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo] { getNextPage := func(ctx context.Context, req ListTablesRequest) (*ListTablesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1812,11 +1812,11 @@ func (a *tablesPreviewImpl) List(ctx context.Context, request ListTablesRequest) // must also be the owner or have the **USE_CATALOG** privilege on the parent // catalog and the **USE_SCHEMA** privilege on the parent schema. There is no // guarantee of a specific ordering of the elements in the array. -func (a *tablesPreviewImpl) ListAll(ctx context.Context, request ListTablesRequest) ([]TableInfo, error) { +func (a *tablesImpl) ListAll(ctx context.Context, request ListTablesRequest) ([]TableInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[TableInfo](ctx, iterator) } -func (a *tablesPreviewImpl) internalList(ctx context.Context, request ListTablesRequest) (*ListTablesResponse, error) { +func (a *tablesImpl) internalList(ctx context.Context, request ListTablesRequest) (*ListTablesResponse, error) { var listTablesResponse ListTablesResponse path := "/api/2.1preview/unity-catalog/tables" queryParams := make(map[string]any) @@ -1839,7 +1839,7 @@ func (a *tablesPreviewImpl) internalList(ctx context.Context, request ListTables // ownership or the **USE_CATALOG** privilege on the parent catalog. // // There is no guarantee of a specific ordering of the elements in the array. -func (a *tablesPreviewImpl) ListSummaries(ctx context.Context, request ListSummariesRequest) listing.Iterator[TableSummary] { +func (a *tablesImpl) ListSummaries(ctx context.Context, request ListSummariesRequest) listing.Iterator[TableSummary] { getNextPage := func(ctx context.Context, req ListSummariesRequest) (*ListTableSummariesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1876,11 +1876,11 @@ func (a *tablesPreviewImpl) ListSummaries(ctx context.Context, request ListSumma // ownership or the **USE_CATALOG** privilege on the parent catalog. // // There is no guarantee of a specific ordering of the elements in the array. -func (a *tablesPreviewImpl) ListSummariesAll(ctx context.Context, request ListSummariesRequest) ([]TableSummary, error) { +func (a *tablesImpl) ListSummariesAll(ctx context.Context, request ListSummariesRequest) ([]TableSummary, error) { iterator := a.ListSummaries(ctx, request) return listing.ToSlice[TableSummary](ctx, iterator) } -func (a *tablesPreviewImpl) internalListSummaries(ctx context.Context, request ListSummariesRequest) (*ListTableSummariesResponse, error) { +func (a *tablesImpl) internalListSummaries(ctx context.Context, request ListSummariesRequest) (*ListTableSummariesResponse, error) { var listTableSummariesResponse ListTableSummariesResponse path := "/api/2.1preview/unity-catalog/table-summaries" queryParams := make(map[string]any) @@ -1890,7 +1890,7 @@ func (a *tablesPreviewImpl) internalListSummaries(ctx context.Context, request L return &listTableSummariesResponse, err } -func (a *tablesPreviewImpl) Update(ctx context.Context, request UpdateTableRequest) error { +func (a *tablesImpl) Update(ctx context.Context, request UpdateTableRequest) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/tables/%v", request.FullName) queryParams := make(map[string]any) @@ -1901,12 +1901,12 @@ func (a *tablesPreviewImpl) Update(ctx context.Context, request UpdateTableReque return err } -// unexported type that holds implementations of just TemporaryTableCredentialsPreview API methods -type temporaryTableCredentialsPreviewImpl struct { +// unexported type that holds implementations of just TemporaryTableCredentials API methods +type temporaryTableCredentialsImpl struct { client *client.DatabricksClient } -func (a *temporaryTableCredentialsPreviewImpl) GenerateTemporaryTableCredentials(ctx context.Context, request GenerateTemporaryTableCredentialRequest) (*GenerateTemporaryTableCredentialResponse, error) { +func (a *temporaryTableCredentialsImpl) GenerateTemporaryTableCredentials(ctx context.Context, request GenerateTemporaryTableCredentialRequest) (*GenerateTemporaryTableCredentialResponse, error) { var generateTemporaryTableCredentialResponse GenerateTemporaryTableCredentialResponse path := "/api/2.0preview/unity-catalog/temporary-table-credentials" queryParams := make(map[string]any) @@ -1917,12 +1917,12 @@ func (a *temporaryTableCredentialsPreviewImpl) GenerateTemporaryTableCredentials return &generateTemporaryTableCredentialResponse, err } -// unexported type that holds implementations of just VolumesPreview API methods -type volumesPreviewImpl struct { +// unexported type that holds implementations of just Volumes API methods +type volumesImpl struct { client *client.DatabricksClient } -func (a *volumesPreviewImpl) Create(ctx context.Context, request CreateVolumeRequestContent) (*VolumeInfo, error) { +func (a *volumesImpl) Create(ctx context.Context, request CreateVolumeRequestContent) (*VolumeInfo, error) { var volumeInfo VolumeInfo path := "/api/2.1preview/unity-catalog/volumes" queryParams := make(map[string]any) @@ -1933,7 +1933,7 @@ func (a *volumesPreviewImpl) Create(ctx context.Context, request CreateVolumeReq return &volumeInfo, err } -func (a *volumesPreviewImpl) Delete(ctx context.Context, request DeleteVolumeRequest) error { +func (a *volumesImpl) Delete(ctx context.Context, request DeleteVolumeRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/volumes/%v", request.Name) queryParams := make(map[string]any) @@ -1955,7 +1955,7 @@ func (a *volumesPreviewImpl) Delete(ctx context.Context, request DeleteVolumeReq // parent catalog and the **USE_SCHEMA** privilege on the parent schema. // // There is no guarantee of a specific ordering of the elements in the array. -func (a *volumesPreviewImpl) List(ctx context.Context, request ListVolumesRequest) listing.Iterator[VolumeInfo] { +func (a *volumesImpl) List(ctx context.Context, request ListVolumesRequest) listing.Iterator[VolumeInfo] { getNextPage := func(ctx context.Context, req ListVolumesRequest) (*ListVolumesResponseContent, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1992,11 +1992,11 @@ func (a *volumesPreviewImpl) List(ctx context.Context, request ListVolumesReques // parent catalog and the **USE_SCHEMA** privilege on the parent schema. // // There is no guarantee of a specific ordering of the elements in the array. -func (a *volumesPreviewImpl) ListAll(ctx context.Context, request ListVolumesRequest) ([]VolumeInfo, error) { +func (a *volumesImpl) ListAll(ctx context.Context, request ListVolumesRequest) ([]VolumeInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[VolumeInfo](ctx, iterator) } -func (a *volumesPreviewImpl) internalList(ctx context.Context, request ListVolumesRequest) (*ListVolumesResponseContent, error) { +func (a *volumesImpl) internalList(ctx context.Context, request ListVolumesRequest) (*ListVolumesResponseContent, error) { var listVolumesResponseContent ListVolumesResponseContent path := "/api/2.1preview/unity-catalog/volumes" queryParams := make(map[string]any) @@ -2006,7 +2006,7 @@ func (a *volumesPreviewImpl) internalList(ctx context.Context, request ListVolum return &listVolumesResponseContent, err } -func (a *volumesPreviewImpl) Read(ctx context.Context, request ReadVolumeRequest) (*VolumeInfo, error) { +func (a *volumesImpl) Read(ctx context.Context, request ReadVolumeRequest) (*VolumeInfo, error) { var volumeInfo VolumeInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/volumes/%v", request.Name) queryParams := make(map[string]any) @@ -2016,7 +2016,7 @@ func (a *volumesPreviewImpl) Read(ctx context.Context, request ReadVolumeRequest return &volumeInfo, err } -func (a *volumesPreviewImpl) Update(ctx context.Context, request UpdateVolumeRequestContent) (*VolumeInfo, error) { +func (a *volumesImpl) Update(ctx context.Context, request UpdateVolumeRequestContent) (*VolumeInfo, error) { var volumeInfo VolumeInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/volumes/%v", request.Name) queryParams := make(map[string]any) @@ -2027,12 +2027,12 @@ func (a *volumesPreviewImpl) Update(ctx context.Context, request UpdateVolumeReq return &volumeInfo, err } -// unexported type that holds implementations of just WorkspaceBindingsPreview API methods -type workspaceBindingsPreviewImpl struct { +// unexported type that holds implementations of just WorkspaceBindings API methods +type workspaceBindingsImpl struct { client *client.DatabricksClient } -func (a *workspaceBindingsPreviewImpl) Get(ctx context.Context, request GetWorkspaceBindingRequest) (*CurrentWorkspaceBindings, error) { +func (a *workspaceBindingsImpl) Get(ctx context.Context, request GetWorkspaceBindingRequest) (*CurrentWorkspaceBindings, error) { var currentWorkspaceBindings CurrentWorkspaceBindings path := fmt.Sprintf("/api/2.1preview/unity-catalog/workspace-bindings/catalogs/%v", request.Name) queryParams := make(map[string]any) @@ -2046,7 +2046,7 @@ func (a *workspaceBindingsPreviewImpl) Get(ctx context.Context, request GetWorks // // Gets workspace bindings of the securable. The caller must be a metastore // admin or an owner of the securable. -func (a *workspaceBindingsPreviewImpl) GetBindings(ctx context.Context, request GetBindingsRequest) listing.Iterator[WorkspaceBinding] { +func (a *workspaceBindingsImpl) GetBindings(ctx context.Context, request GetBindingsRequest) listing.Iterator[WorkspaceBinding] { getNextPage := func(ctx context.Context, req GetBindingsRequest) (*WorkspaceBindingsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -2074,11 +2074,11 @@ func (a *workspaceBindingsPreviewImpl) GetBindings(ctx context.Context, request // // Gets workspace bindings of the securable. The caller must be a metastore // admin or an owner of the securable. -func (a *workspaceBindingsPreviewImpl) GetBindingsAll(ctx context.Context, request GetBindingsRequest) ([]WorkspaceBinding, error) { +func (a *workspaceBindingsImpl) GetBindingsAll(ctx context.Context, request GetBindingsRequest) ([]WorkspaceBinding, error) { iterator := a.GetBindings(ctx, request) return listing.ToSlice[WorkspaceBinding](ctx, iterator) } -func (a *workspaceBindingsPreviewImpl) internalGetBindings(ctx context.Context, request GetBindingsRequest) (*WorkspaceBindingsResponse, error) { +func (a *workspaceBindingsImpl) internalGetBindings(ctx context.Context, request GetBindingsRequest) (*WorkspaceBindingsResponse, error) { var workspaceBindingsResponse WorkspaceBindingsResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/bindings/%v/%v", request.SecurableType, request.SecurableName) queryParams := make(map[string]any) @@ -2088,7 +2088,7 @@ func (a *workspaceBindingsPreviewImpl) internalGetBindings(ctx context.Context, return &workspaceBindingsResponse, err } -func (a *workspaceBindingsPreviewImpl) Update(ctx context.Context, request UpdateWorkspaceBindings) (*CurrentWorkspaceBindings, error) { +func (a *workspaceBindingsImpl) Update(ctx context.Context, request UpdateWorkspaceBindings) (*CurrentWorkspaceBindings, error) { var currentWorkspaceBindings CurrentWorkspaceBindings path := fmt.Sprintf("/api/2.1preview/unity-catalog/workspace-bindings/catalogs/%v", request.Name) queryParams := make(map[string]any) @@ -2099,7 +2099,7 @@ func (a *workspaceBindingsPreviewImpl) Update(ctx context.Context, request Updat return ¤tWorkspaceBindings, err } -func (a *workspaceBindingsPreviewImpl) UpdateBindings(ctx context.Context, request UpdateWorkspaceBindingsParameters) (*WorkspaceBindingsResponse, error) { +func (a *workspaceBindingsImpl) UpdateBindings(ctx context.Context, request UpdateWorkspaceBindingsParameters) (*WorkspaceBindingsResponse, error) { var workspaceBindingsResponse WorkspaceBindingsResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/bindings/%v/%v", request.SecurableType, request.SecurableName) queryParams := make(map[string]any) diff --git a/cleanrooms/v2preview/api.go b/cleanrooms/v2preview/api.go index 0d568e973..97a02d73a 100755 --- a/cleanrooms/v2preview/api.go +++ b/cleanrooms/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Clean Room Assets Preview, Clean Room Task Runs Preview, Clean Rooms Preview, etc. +// These APIs allow you to manage Clean Room Assets, Clean Room Task Runs, Clean Rooms, etc. package cleanroomspreview import ( @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/listing" ) -type CleanRoomAssetsPreviewInterface interface { +type CleanRoomAssetsInterface interface { // Create an asset. // @@ -61,9 +61,9 @@ type CleanRoomAssetsPreviewInterface interface { Update(ctx context.Context, request UpdateCleanRoomAssetRequest) (*CleanRoomAsset, error) } -func NewCleanRoomAssetsPreview(client *client.DatabricksClient) *CleanRoomAssetsPreviewAPI { - return &CleanRoomAssetsPreviewAPI{ - cleanRoomAssetsPreviewImpl: cleanRoomAssetsPreviewImpl{ +func NewCleanRoomAssets(client *client.DatabricksClient) *CleanRoomAssetsAPI { + return &CleanRoomAssetsAPI{ + cleanRoomAssetsImpl: cleanRoomAssetsImpl{ client: client, }, } @@ -71,15 +71,15 @@ func NewCleanRoomAssetsPreview(client *client.DatabricksClient) *CleanRoomAssets // Clean room assets are data and code objects — Tables, volumes, and // notebooks that are shared with the clean room. -type CleanRoomAssetsPreviewAPI struct { - cleanRoomAssetsPreviewImpl +type CleanRoomAssetsAPI struct { + cleanRoomAssetsImpl } // Delete an asset. // // Delete a clean room asset - unshare/remove the asset from the clean room -func (a *CleanRoomAssetsPreviewAPI) DeleteByCleanRoomNameAndAssetTypeAndAssetFullName(ctx context.Context, cleanRoomName string, assetType CleanRoomAssetAssetType, assetFullName string) error { - return a.cleanRoomAssetsPreviewImpl.Delete(ctx, DeleteCleanRoomAssetRequest{ +func (a *CleanRoomAssetsAPI) DeleteByCleanRoomNameAndAssetTypeAndAssetFullName(ctx context.Context, cleanRoomName string, assetType CleanRoomAssetAssetType, assetFullName string) error { + return a.cleanRoomAssetsImpl.Delete(ctx, DeleteCleanRoomAssetRequest{ CleanRoomName: cleanRoomName, AssetType: assetType, AssetFullName: assetFullName, @@ -89,8 +89,8 @@ func (a *CleanRoomAssetsPreviewAPI) DeleteByCleanRoomNameAndAssetTypeAndAssetFul // Get an asset. // // Get the details of a clean room asset by its type and full name. -func (a *CleanRoomAssetsPreviewAPI) GetByCleanRoomNameAndAssetTypeAndAssetFullName(ctx context.Context, cleanRoomName string, assetType CleanRoomAssetAssetType, assetFullName string) (*CleanRoomAsset, error) { - return a.cleanRoomAssetsPreviewImpl.Get(ctx, GetCleanRoomAssetRequest{ +func (a *CleanRoomAssetsAPI) GetByCleanRoomNameAndAssetTypeAndAssetFullName(ctx context.Context, cleanRoomName string, assetType CleanRoomAssetAssetType, assetFullName string) (*CleanRoomAsset, error) { + return a.cleanRoomAssetsImpl.Get(ctx, GetCleanRoomAssetRequest{ CleanRoomName: cleanRoomName, AssetType: assetType, AssetFullName: assetFullName, @@ -98,13 +98,13 @@ func (a *CleanRoomAssetsPreviewAPI) GetByCleanRoomNameAndAssetTypeAndAssetFullNa } // List assets. -func (a *CleanRoomAssetsPreviewAPI) ListByCleanRoomName(ctx context.Context, cleanRoomName string) (*ListCleanRoomAssetsResponse, error) { - return a.cleanRoomAssetsPreviewImpl.internalList(ctx, ListCleanRoomAssetsRequest{ +func (a *CleanRoomAssetsAPI) ListByCleanRoomName(ctx context.Context, cleanRoomName string) (*ListCleanRoomAssetsResponse, error) { + return a.cleanRoomAssetsImpl.internalList(ctx, ListCleanRoomAssetsRequest{ CleanRoomName: cleanRoomName, }) } -type CleanRoomTaskRunsPreviewInterface interface { +type CleanRoomTaskRunsInterface interface { // List notebook task runs. // @@ -126,29 +126,29 @@ type CleanRoomTaskRunsPreviewInterface interface { ListByCleanRoomName(ctx context.Context, cleanRoomName string) (*ListCleanRoomNotebookTaskRunsResponse, error) } -func NewCleanRoomTaskRunsPreview(client *client.DatabricksClient) *CleanRoomTaskRunsPreviewAPI { - return &CleanRoomTaskRunsPreviewAPI{ - cleanRoomTaskRunsPreviewImpl: cleanRoomTaskRunsPreviewImpl{ +func NewCleanRoomTaskRuns(client *client.DatabricksClient) *CleanRoomTaskRunsAPI { + return &CleanRoomTaskRunsAPI{ + cleanRoomTaskRunsImpl: cleanRoomTaskRunsImpl{ client: client, }, } } // Clean room task runs are the executions of notebooks in a clean room. -type CleanRoomTaskRunsPreviewAPI struct { - cleanRoomTaskRunsPreviewImpl +type CleanRoomTaskRunsAPI struct { + cleanRoomTaskRunsImpl } // List notebook task runs. // // List all the historical notebook task runs in a clean room. -func (a *CleanRoomTaskRunsPreviewAPI) ListByCleanRoomName(ctx context.Context, cleanRoomName string) (*ListCleanRoomNotebookTaskRunsResponse, error) { - return a.cleanRoomTaskRunsPreviewImpl.internalList(ctx, ListCleanRoomNotebookTaskRunsRequest{ +func (a *CleanRoomTaskRunsAPI) ListByCleanRoomName(ctx context.Context, cleanRoomName string) (*ListCleanRoomNotebookTaskRunsResponse, error) { + return a.cleanRoomTaskRunsImpl.internalList(ctx, ListCleanRoomNotebookTaskRunsRequest{ CleanRoomName: cleanRoomName, }) } -type CleanRoomsPreviewInterface interface { +type CleanRoomsInterface interface { // Create a clean room. // @@ -220,9 +220,9 @@ type CleanRoomsPreviewInterface interface { Update(ctx context.Context, request UpdateCleanRoomRequest) (*CleanRoom, error) } -func NewCleanRoomsPreview(client *client.DatabricksClient) *CleanRoomsPreviewAPI { - return &CleanRoomsPreviewAPI{ - cleanRoomsPreviewImpl: cleanRoomsPreviewImpl{ +func NewCleanRooms(client *client.DatabricksClient) *CleanRoomsAPI { + return &CleanRoomsAPI{ + cleanRoomsImpl: cleanRoomsImpl{ client: client, }, } @@ -231,8 +231,8 @@ func NewCleanRoomsPreview(client *client.DatabricksClient) *CleanRoomsPreviewAPI // A clean room uses Delta Sharing and serverless compute to provide a secure // and privacy-protecting environment where multiple parties can work together // on sensitive enterprise data without direct access to each other’s data. -type CleanRoomsPreviewAPI struct { - cleanRoomsPreviewImpl +type CleanRoomsAPI struct { + cleanRoomsImpl } // Delete a clean room. @@ -241,8 +241,8 @@ type CleanRoomsPreviewAPI struct { // metastore. If the other collaborators have not deleted the clean room, they // will still have the clean room in their metastore, but it will be in a // DELETED state and no operations other than deletion can be performed on it. -func (a *CleanRoomsPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.cleanRoomsPreviewImpl.Delete(ctx, DeleteCleanRoomRequest{ +func (a *CleanRoomsAPI) DeleteByName(ctx context.Context, name string) error { + return a.cleanRoomsImpl.Delete(ctx, DeleteCleanRoomRequest{ Name: name, }) } @@ -250,8 +250,8 @@ func (a *CleanRoomsPreviewAPI) DeleteByName(ctx context.Context, name string) er // Get a clean room. // // Get the details of a clean room given its name. -func (a *CleanRoomsPreviewAPI) GetByName(ctx context.Context, name string) (*CleanRoom, error) { - return a.cleanRoomsPreviewImpl.Get(ctx, GetCleanRoomRequest{ +func (a *CleanRoomsAPI) GetByName(ctx context.Context, name string) (*CleanRoom, error) { + return a.cleanRoomsImpl.Get(ctx, GetCleanRoomRequest{ Name: name, }) } diff --git a/cleanrooms/v2preview/client.go b/cleanrooms/v2preview/client.go index e003bd7c9..9ca3f89a1 100755 --- a/cleanrooms/v2preview/client.go +++ b/cleanrooms/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type CleanRoomAssetsPreviewClient struct { - CleanRoomAssetsPreviewInterface +type CleanRoomAssetsClient struct { + CleanRoomAssetsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewCleanRoomAssetsPreviewClient(cfg *config.Config) (*CleanRoomAssetsPreviewClient, error) { +func NewCleanRoomAssetsClient(cfg *config.Config) (*CleanRoomAssetsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewCleanRoomAssetsPreviewClient(cfg *config.Config) (*CleanRoomAssetsPrevie return nil, err } - return &CleanRoomAssetsPreviewClient{ - Config: cfg, - apiClient: apiClient, - CleanRoomAssetsPreviewInterface: NewCleanRoomAssetsPreview(databricksClient), + return &CleanRoomAssetsClient{ + Config: cfg, + apiClient: apiClient, + CleanRoomAssetsInterface: NewCleanRoomAssets(databricksClient), }, nil } -type CleanRoomTaskRunsPreviewClient struct { - CleanRoomTaskRunsPreviewInterface +type CleanRoomTaskRunsClient struct { + CleanRoomTaskRunsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewCleanRoomTaskRunsPreviewClient(cfg *config.Config) (*CleanRoomTaskRunsPreviewClient, error) { +func NewCleanRoomTaskRunsClient(cfg *config.Config) (*CleanRoomTaskRunsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,20 +71,20 @@ func NewCleanRoomTaskRunsPreviewClient(cfg *config.Config) (*CleanRoomTaskRunsPr return nil, err } - return &CleanRoomTaskRunsPreviewClient{ - Config: cfg, - apiClient: apiClient, - CleanRoomTaskRunsPreviewInterface: NewCleanRoomTaskRunsPreview(databricksClient), + return &CleanRoomTaskRunsClient{ + Config: cfg, + apiClient: apiClient, + CleanRoomTaskRunsInterface: NewCleanRoomTaskRuns(databricksClient), }, nil } -type CleanRoomsPreviewClient struct { - CleanRoomsPreviewInterface +type CleanRoomsClient struct { + CleanRoomsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewCleanRoomsPreviewClient(cfg *config.Config) (*CleanRoomsPreviewClient, error) { +func NewCleanRoomsClient(cfg *config.Config) (*CleanRoomsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -105,9 +105,9 @@ func NewCleanRoomsPreviewClient(cfg *config.Config) (*CleanRoomsPreviewClient, e return nil, err } - return &CleanRoomsPreviewClient{ - Config: cfg, - apiClient: apiClient, - CleanRoomsPreviewInterface: NewCleanRoomsPreview(databricksClient), + return &CleanRoomsClient{ + Config: cfg, + apiClient: apiClient, + CleanRoomsInterface: NewCleanRooms(databricksClient), }, nil } diff --git a/cleanrooms/v2preview/impl.go b/cleanrooms/v2preview/impl.go index 7c4903a52..af70d174d 100755 --- a/cleanrooms/v2preview/impl.go +++ b/cleanrooms/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just CleanRoomAssetsPreview API methods -type cleanRoomAssetsPreviewImpl struct { +// unexported type that holds implementations of just CleanRoomAssets API methods +type cleanRoomAssetsImpl struct { client *client.DatabricksClient } -func (a *cleanRoomAssetsPreviewImpl) Create(ctx context.Context, request CreateCleanRoomAssetRequest) (*CleanRoomAsset, error) { +func (a *cleanRoomAssetsImpl) Create(ctx context.Context, request CreateCleanRoomAssetRequest) (*CleanRoomAsset, error) { var cleanRoomAsset CleanRoomAsset path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/assets", request.CleanRoomName) queryParams := make(map[string]any) @@ -28,7 +28,7 @@ func (a *cleanRoomAssetsPreviewImpl) Create(ctx context.Context, request CreateC return &cleanRoomAsset, err } -func (a *cleanRoomAssetsPreviewImpl) Delete(ctx context.Context, request DeleteCleanRoomAssetRequest) error { +func (a *cleanRoomAssetsImpl) Delete(ctx context.Context, request DeleteCleanRoomAssetRequest) error { var deleteCleanRoomAssetResponse DeleteCleanRoomAssetResponse path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/assets/%v/%v", request.CleanRoomName, request.AssetType, request.AssetFullName) queryParams := make(map[string]any) @@ -38,7 +38,7 @@ func (a *cleanRoomAssetsPreviewImpl) Delete(ctx context.Context, request DeleteC return err } -func (a *cleanRoomAssetsPreviewImpl) Get(ctx context.Context, request GetCleanRoomAssetRequest) (*CleanRoomAsset, error) { +func (a *cleanRoomAssetsImpl) Get(ctx context.Context, request GetCleanRoomAssetRequest) (*CleanRoomAsset, error) { var cleanRoomAsset CleanRoomAsset path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/assets/%v/%v", request.CleanRoomName, request.AssetType, request.AssetFullName) queryParams := make(map[string]any) @@ -49,7 +49,7 @@ func (a *cleanRoomAssetsPreviewImpl) Get(ctx context.Context, request GetCleanRo } // List assets. -func (a *cleanRoomAssetsPreviewImpl) List(ctx context.Context, request ListCleanRoomAssetsRequest) listing.Iterator[CleanRoomAsset] { +func (a *cleanRoomAssetsImpl) List(ctx context.Context, request ListCleanRoomAssetsRequest) listing.Iterator[CleanRoomAsset] { getNextPage := func(ctx context.Context, req ListCleanRoomAssetsRequest) (*ListCleanRoomAssetsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -74,11 +74,11 @@ func (a *cleanRoomAssetsPreviewImpl) List(ctx context.Context, request ListClean } // List assets. -func (a *cleanRoomAssetsPreviewImpl) ListAll(ctx context.Context, request ListCleanRoomAssetsRequest) ([]CleanRoomAsset, error) { +func (a *cleanRoomAssetsImpl) ListAll(ctx context.Context, request ListCleanRoomAssetsRequest) ([]CleanRoomAsset, error) { iterator := a.List(ctx, request) return listing.ToSlice[CleanRoomAsset](ctx, iterator) } -func (a *cleanRoomAssetsPreviewImpl) internalList(ctx context.Context, request ListCleanRoomAssetsRequest) (*ListCleanRoomAssetsResponse, error) { +func (a *cleanRoomAssetsImpl) internalList(ctx context.Context, request ListCleanRoomAssetsRequest) (*ListCleanRoomAssetsResponse, error) { var listCleanRoomAssetsResponse ListCleanRoomAssetsResponse path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/assets", request.CleanRoomName) queryParams := make(map[string]any) @@ -88,7 +88,7 @@ func (a *cleanRoomAssetsPreviewImpl) internalList(ctx context.Context, request L return &listCleanRoomAssetsResponse, err } -func (a *cleanRoomAssetsPreviewImpl) Update(ctx context.Context, request UpdateCleanRoomAssetRequest) (*CleanRoomAsset, error) { +func (a *cleanRoomAssetsImpl) Update(ctx context.Context, request UpdateCleanRoomAssetRequest) (*CleanRoomAsset, error) { var cleanRoomAsset CleanRoomAsset path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/assets/%v/%v", request.CleanRoomName, request.AssetType, request.Name) queryParams := make(map[string]any) @@ -99,15 +99,15 @@ func (a *cleanRoomAssetsPreviewImpl) Update(ctx context.Context, request UpdateC return &cleanRoomAsset, err } -// unexported type that holds implementations of just CleanRoomTaskRunsPreview API methods -type cleanRoomTaskRunsPreviewImpl struct { +// unexported type that holds implementations of just CleanRoomTaskRuns API methods +type cleanRoomTaskRunsImpl struct { client *client.DatabricksClient } // List notebook task runs. // // List all the historical notebook task runs in a clean room. -func (a *cleanRoomTaskRunsPreviewImpl) List(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) listing.Iterator[CleanRoomNotebookTaskRun] { +func (a *cleanRoomTaskRunsImpl) List(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) listing.Iterator[CleanRoomNotebookTaskRun] { getNextPage := func(ctx context.Context, req ListCleanRoomNotebookTaskRunsRequest) (*ListCleanRoomNotebookTaskRunsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -134,11 +134,11 @@ func (a *cleanRoomTaskRunsPreviewImpl) List(ctx context.Context, request ListCle // List notebook task runs. // // List all the historical notebook task runs in a clean room. -func (a *cleanRoomTaskRunsPreviewImpl) ListAll(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) ([]CleanRoomNotebookTaskRun, error) { +func (a *cleanRoomTaskRunsImpl) ListAll(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) ([]CleanRoomNotebookTaskRun, error) { iterator := a.List(ctx, request) return listing.ToSlice[CleanRoomNotebookTaskRun](ctx, iterator) } -func (a *cleanRoomTaskRunsPreviewImpl) internalList(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) (*ListCleanRoomNotebookTaskRunsResponse, error) { +func (a *cleanRoomTaskRunsImpl) internalList(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) (*ListCleanRoomNotebookTaskRunsResponse, error) { var listCleanRoomNotebookTaskRunsResponse ListCleanRoomNotebookTaskRunsResponse path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/runs", request.CleanRoomName) queryParams := make(map[string]any) @@ -148,12 +148,12 @@ func (a *cleanRoomTaskRunsPreviewImpl) internalList(ctx context.Context, request return &listCleanRoomNotebookTaskRunsResponse, err } -// unexported type that holds implementations of just CleanRoomsPreview API methods -type cleanRoomsPreviewImpl struct { +// unexported type that holds implementations of just CleanRooms API methods +type cleanRoomsImpl struct { client *client.DatabricksClient } -func (a *cleanRoomsPreviewImpl) Create(ctx context.Context, request CreateCleanRoomRequest) (*CleanRoom, error) { +func (a *cleanRoomsImpl) Create(ctx context.Context, request CreateCleanRoomRequest) (*CleanRoom, error) { var cleanRoom CleanRoom path := "/api/2.0preview/clean-rooms" queryParams := make(map[string]any) @@ -164,7 +164,7 @@ func (a *cleanRoomsPreviewImpl) Create(ctx context.Context, request CreateCleanR return &cleanRoom, err } -func (a *cleanRoomsPreviewImpl) CreateOutputCatalog(ctx context.Context, request CreateCleanRoomOutputCatalogRequest) (*CreateCleanRoomOutputCatalogResponse, error) { +func (a *cleanRoomsImpl) CreateOutputCatalog(ctx context.Context, request CreateCleanRoomOutputCatalogRequest) (*CreateCleanRoomOutputCatalogResponse, error) { var createCleanRoomOutputCatalogResponse CreateCleanRoomOutputCatalogResponse path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v/output-catalogs", request.CleanRoomName) queryParams := make(map[string]any) @@ -175,7 +175,7 @@ func (a *cleanRoomsPreviewImpl) CreateOutputCatalog(ctx context.Context, request return &createCleanRoomOutputCatalogResponse, err } -func (a *cleanRoomsPreviewImpl) Delete(ctx context.Context, request DeleteCleanRoomRequest) error { +func (a *cleanRoomsImpl) Delete(ctx context.Context, request DeleteCleanRoomRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v", request.Name) queryParams := make(map[string]any) @@ -185,7 +185,7 @@ func (a *cleanRoomsPreviewImpl) Delete(ctx context.Context, request DeleteCleanR return err } -func (a *cleanRoomsPreviewImpl) Get(ctx context.Context, request GetCleanRoomRequest) (*CleanRoom, error) { +func (a *cleanRoomsImpl) Get(ctx context.Context, request GetCleanRoomRequest) (*CleanRoom, error) { var cleanRoom CleanRoom path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v", request.Name) queryParams := make(map[string]any) @@ -199,7 +199,7 @@ func (a *cleanRoomsPreviewImpl) Get(ctx context.Context, request GetCleanRoomReq // // Get a list of all clean rooms of the metastore. Only clean rooms the caller // has access to are returned. -func (a *cleanRoomsPreviewImpl) List(ctx context.Context, request ListCleanRoomsRequest) listing.Iterator[CleanRoom] { +func (a *cleanRoomsImpl) List(ctx context.Context, request ListCleanRoomsRequest) listing.Iterator[CleanRoom] { getNextPage := func(ctx context.Context, req ListCleanRoomsRequest) (*ListCleanRoomsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -227,11 +227,11 @@ func (a *cleanRoomsPreviewImpl) List(ctx context.Context, request ListCleanRooms // // Get a list of all clean rooms of the metastore. Only clean rooms the caller // has access to are returned. -func (a *cleanRoomsPreviewImpl) ListAll(ctx context.Context, request ListCleanRoomsRequest) ([]CleanRoom, error) { +func (a *cleanRoomsImpl) ListAll(ctx context.Context, request ListCleanRoomsRequest) ([]CleanRoom, error) { iterator := a.List(ctx, request) return listing.ToSlice[CleanRoom](ctx, iterator) } -func (a *cleanRoomsPreviewImpl) internalList(ctx context.Context, request ListCleanRoomsRequest) (*ListCleanRoomsResponse, error) { +func (a *cleanRoomsImpl) internalList(ctx context.Context, request ListCleanRoomsRequest) (*ListCleanRoomsResponse, error) { var listCleanRoomsResponse ListCleanRoomsResponse path := "/api/2.0preview/clean-rooms" queryParams := make(map[string]any) @@ -241,7 +241,7 @@ func (a *cleanRoomsPreviewImpl) internalList(ctx context.Context, request ListCl return &listCleanRoomsResponse, err } -func (a *cleanRoomsPreviewImpl) Update(ctx context.Context, request UpdateCleanRoomRequest) (*CleanRoom, error) { +func (a *cleanRoomsImpl) Update(ctx context.Context, request UpdateCleanRoomRequest) (*CleanRoom, error) { var cleanRoom CleanRoom path := fmt.Sprintf("/api/2.0preview/clean-rooms/%v", request.Name) queryParams := make(map[string]any) diff --git a/compute/v2/impl.go b/compute/v2/impl.go index f80542fa9..2b4009cf8 100755 --- a/compute/v2/impl.go +++ b/compute/v2/impl.go @@ -208,7 +208,6 @@ func (a *clustersImpl) Events(ctx context.Context, request GetEvents) listing.It return nil } request = *resp.NextPage - return &request } iterator := listing.NewIterator( diff --git a/compute/v2preview/api.go b/compute/v2preview/api.go index f5d3fc7ba..c674f3d3e 100755 --- a/compute/v2preview/api.go +++ b/compute/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Cluster Policies Preview, Clusters Preview, Command Execution Preview, Global Init Scripts Preview, Instance Pools Preview, Instance Profiles Preview, Libraries Preview, Policy Compliance For Clusters Preview, Policy Families Preview, etc. +// These APIs allow you to manage Cluster Policies, Clusters, Command Execution, Global Init Scripts, Instance Pools, Instance Profiles, Libraries, Policy Compliance For Clusters, Policy Families, etc. package computepreview import ( @@ -12,7 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type ClusterPoliciesPreviewInterface interface { +type ClusterPoliciesInterface interface { // Create a new policy. // @@ -85,7 +85,7 @@ type ClusterPoliciesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListClusterPoliciesRequest) ([]Policy, error) - // PolicyNameToPolicyIdMap calls [ClusterPoliciesPreviewAPI.ListAll] and creates a map of results with [Policy].Name as key and [Policy].PolicyId as value. + // PolicyNameToPolicyIdMap calls [ClusterPoliciesAPI.ListAll] and creates a map of results with [Policy].Name as key and [Policy].PolicyId as value. // // Returns an error if there's more than one [Policy] with the same .Name. // @@ -94,7 +94,7 @@ type ClusterPoliciesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. PolicyNameToPolicyIdMap(ctx context.Context, request ListClusterPoliciesRequest) (map[string]string, error) - // GetByName calls [ClusterPoliciesPreviewAPI.PolicyNameToPolicyIdMap] and returns a single [Policy]. + // GetByName calls [ClusterPoliciesAPI.PolicyNameToPolicyIdMap] and returns a single [Policy]. // // Returns an error if there's more than one [Policy] with the same .Name. // @@ -117,9 +117,9 @@ type ClusterPoliciesPreviewInterface interface { UpdatePermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) } -func NewClusterPoliciesPreview(client *client.DatabricksClient) *ClusterPoliciesPreviewAPI { - return &ClusterPoliciesPreviewAPI{ - clusterPoliciesPreviewImpl: clusterPoliciesPreviewImpl{ +func NewClusterPolicies(client *client.DatabricksClient) *ClusterPoliciesAPI { + return &ClusterPoliciesAPI{ + clusterPoliciesImpl: clusterPoliciesImpl{ client: client, }, } @@ -148,16 +148,16 @@ func NewClusterPoliciesPreview(client *client.DatabricksClient) *ClusterPolicies // If no policies exist in the workspace, the Policy drop-down doesn't appear. // Only admin users can create, edit, and delete policies. Admin users also have // access to all policies. -type ClusterPoliciesPreviewAPI struct { - clusterPoliciesPreviewImpl +type ClusterPoliciesAPI struct { + clusterPoliciesImpl } // Delete a cluster policy. // // Delete a policy for a cluster. Clusters governed by this policy can still // run, but cannot be edited. -func (a *ClusterPoliciesPreviewAPI) DeleteByPolicyId(ctx context.Context, policyId string) error { - return a.clusterPoliciesPreviewImpl.Delete(ctx, DeletePolicy{ +func (a *ClusterPoliciesAPI) DeleteByPolicyId(ctx context.Context, policyId string) error { + return a.clusterPoliciesImpl.Delete(ctx, DeletePolicy{ PolicyId: policyId, }) } @@ -166,8 +166,8 @@ func (a *ClusterPoliciesPreviewAPI) DeleteByPolicyId(ctx context.Context, policy // // Get a cluster policy entity. Creation and editing is available to admins // only. -func (a *ClusterPoliciesPreviewAPI) GetByPolicyId(ctx context.Context, policyId string) (*Policy, error) { - return a.clusterPoliciesPreviewImpl.Get(ctx, GetClusterPolicyRequest{ +func (a *ClusterPoliciesAPI) GetByPolicyId(ctx context.Context, policyId string) (*Policy, error) { + return a.clusterPoliciesImpl.Get(ctx, GetClusterPolicyRequest{ PolicyId: policyId, }) } @@ -175,8 +175,8 @@ func (a *ClusterPoliciesPreviewAPI) GetByPolicyId(ctx context.Context, policyId // Get cluster policy permission levels. // // Gets the permission levels that a user can have on an object. -func (a *ClusterPoliciesPreviewAPI) GetPermissionLevelsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*GetClusterPolicyPermissionLevelsResponse, error) { - return a.clusterPoliciesPreviewImpl.GetPermissionLevels(ctx, GetClusterPolicyPermissionLevelsRequest{ +func (a *ClusterPoliciesAPI) GetPermissionLevelsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*GetClusterPolicyPermissionLevelsResponse, error) { + return a.clusterPoliciesImpl.GetPermissionLevels(ctx, GetClusterPolicyPermissionLevelsRequest{ ClusterPolicyId: clusterPolicyId, }) } @@ -185,20 +185,20 @@ func (a *ClusterPoliciesPreviewAPI) GetPermissionLevelsByClusterPolicyId(ctx con // // Gets the permissions of a cluster policy. Cluster policies can inherit // permissions from their root object. -func (a *ClusterPoliciesPreviewAPI) GetPermissionsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*ClusterPolicyPermissions, error) { - return a.clusterPoliciesPreviewImpl.GetPermissions(ctx, GetClusterPolicyPermissionsRequest{ +func (a *ClusterPoliciesAPI) GetPermissionsByClusterPolicyId(ctx context.Context, clusterPolicyId string) (*ClusterPolicyPermissions, error) { + return a.clusterPoliciesImpl.GetPermissions(ctx, GetClusterPolicyPermissionsRequest{ ClusterPolicyId: clusterPolicyId, }) } -// PolicyNameToPolicyIdMap calls [ClusterPoliciesPreviewAPI.ListAll] and creates a map of results with [Policy].Name as key and [Policy].PolicyId as value. +// PolicyNameToPolicyIdMap calls [ClusterPoliciesAPI.ListAll] and creates a map of results with [Policy].Name as key and [Policy].PolicyId as value. // // Returns an error if there's more than one [Policy] with the same .Name. // // Note: All [Policy] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ClusterPoliciesPreviewAPI) PolicyNameToPolicyIdMap(ctx context.Context, request ListClusterPoliciesRequest) (map[string]string, error) { +func (a *ClusterPoliciesAPI) PolicyNameToPolicyIdMap(ctx context.Context, request ListClusterPoliciesRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -216,14 +216,14 @@ func (a *ClusterPoliciesPreviewAPI) PolicyNameToPolicyIdMap(ctx context.Context, return mapping, nil } -// GetByName calls [ClusterPoliciesPreviewAPI.PolicyNameToPolicyIdMap] and returns a single [Policy]. +// GetByName calls [ClusterPoliciesAPI.PolicyNameToPolicyIdMap] and returns a single [Policy]. // // Returns an error if there's more than one [Policy] with the same .Name. // // Note: All [Policy] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ClusterPoliciesPreviewAPI) GetByName(ctx context.Context, name string) (*Policy, error) { +func (a *ClusterPoliciesAPI) GetByName(ctx context.Context, name string) (*Policy, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListClusterPoliciesRequest{}) if err != nil { @@ -244,7 +244,8 @@ func (a *ClusterPoliciesPreviewAPI) GetByName(ctx context.Context, name string) return &alternatives[0], nil } -type ClustersPreviewInterface interface { +type ClustersInterface interface { + clustersAPIUtilities // Change cluster owner. // @@ -373,7 +374,7 @@ type ClustersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListClustersRequest) ([]ClusterDetails, error) - // ClusterDetailsClusterNameToClusterIdMap calls [ClustersPreviewAPI.ListAll] and creates a map of results with [ClusterDetails].ClusterName as key and [ClusterDetails].ClusterId as value. + // ClusterDetailsClusterNameToClusterIdMap calls [ClustersAPI.ListAll] and creates a map of results with [ClusterDetails].ClusterName as key and [ClusterDetails].ClusterId as value. // // Returns an error if there's more than one [ClusterDetails] with the same .ClusterName. // @@ -382,7 +383,7 @@ type ClustersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ClusterDetailsClusterNameToClusterIdMap(ctx context.Context, request ListClustersRequest) (map[string]string, error) - // GetByClusterName calls [ClustersPreviewAPI.ClusterDetailsClusterNameToClusterIdMap] and returns a single [ClusterDetails]. + // GetByClusterName calls [ClustersAPI.ClusterDetailsClusterNameToClusterIdMap] and returns a single [ClusterDetails]. // // Returns an error if there's more than one [ClusterDetails] with the same .ClusterName. // @@ -521,9 +522,9 @@ type ClustersPreviewInterface interface { UpdatePermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error) } -func NewClustersPreview(client *client.DatabricksClient) *ClustersPreviewAPI { - return &ClustersPreviewAPI{ - clustersPreviewImpl: clustersPreviewImpl{ +func NewClusters(client *client.DatabricksClient) *ClustersAPI { + return &ClustersAPI{ + clustersImpl: clustersImpl{ client: client, }, } @@ -555,8 +556,8 @@ func NewClustersPreview(client *client.DatabricksClient) *ClustersPreviewAPI { // terminated clusters for 30 days. To keep an all-purpose cluster configuration // even after it has been terminated for more than 30 days, an administrator can // pin a cluster to the cluster list. -type ClustersPreviewAPI struct { - clustersPreviewImpl +type ClustersAPI struct { + clustersImpl } // Terminate cluster. @@ -565,8 +566,8 @@ type ClustersPreviewAPI struct { // asynchronously. Once the termination has completed, the cluster will be in a // `TERMINATED` state. If the cluster is already in a `TERMINATING` or // `TERMINATED` state, nothing will happen. -func (a *ClustersPreviewAPI) DeleteByClusterId(ctx context.Context, clusterId string) error { - return a.clustersPreviewImpl.Delete(ctx, DeleteCluster{ +func (a *ClustersAPI) DeleteByClusterId(ctx context.Context, clusterId string) error { + return a.clustersImpl.Delete(ctx, DeleteCluster{ ClusterId: clusterId, }) } @@ -575,8 +576,8 @@ func (a *ClustersPreviewAPI) DeleteByClusterId(ctx context.Context, clusterId st // // Retrieves the information for a cluster given its identifier. Clusters can be // described while they are running, or up to 60 days after they are terminated. -func (a *ClustersPreviewAPI) GetByClusterId(ctx context.Context, clusterId string) (*ClusterDetails, error) { - return a.clustersPreviewImpl.Get(ctx, GetClusterRequest{ +func (a *ClustersAPI) GetByClusterId(ctx context.Context, clusterId string) (*ClusterDetails, error) { + return a.clustersImpl.Get(ctx, GetClusterRequest{ ClusterId: clusterId, }) } @@ -584,8 +585,8 @@ func (a *ClustersPreviewAPI) GetByClusterId(ctx context.Context, clusterId strin // Get cluster permission levels. // // Gets the permission levels that a user can have on an object. -func (a *ClustersPreviewAPI) GetPermissionLevelsByClusterId(ctx context.Context, clusterId string) (*GetClusterPermissionLevelsResponse, error) { - return a.clustersPreviewImpl.GetPermissionLevels(ctx, GetClusterPermissionLevelsRequest{ +func (a *ClustersAPI) GetPermissionLevelsByClusterId(ctx context.Context, clusterId string) (*GetClusterPermissionLevelsResponse, error) { + return a.clustersImpl.GetPermissionLevels(ctx, GetClusterPermissionLevelsRequest{ ClusterId: clusterId, }) } @@ -594,20 +595,20 @@ func (a *ClustersPreviewAPI) GetPermissionLevelsByClusterId(ctx context.Context, // // Gets the permissions of a cluster. Clusters can inherit permissions from // their root object. -func (a *ClustersPreviewAPI) GetPermissionsByClusterId(ctx context.Context, clusterId string) (*ClusterPermissions, error) { - return a.clustersPreviewImpl.GetPermissions(ctx, GetClusterPermissionsRequest{ +func (a *ClustersAPI) GetPermissionsByClusterId(ctx context.Context, clusterId string) (*ClusterPermissions, error) { + return a.clustersImpl.GetPermissions(ctx, GetClusterPermissionsRequest{ ClusterId: clusterId, }) } -// ClusterDetailsClusterNameToClusterIdMap calls [ClustersPreviewAPI.ListAll] and creates a map of results with [ClusterDetails].ClusterName as key and [ClusterDetails].ClusterId as value. +// ClusterDetailsClusterNameToClusterIdMap calls [ClustersAPI.ListAll] and creates a map of results with [ClusterDetails].ClusterName as key and [ClusterDetails].ClusterId as value. // // Returns an error if there's more than one [ClusterDetails] with the same .ClusterName. // // Note: All [ClusterDetails] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ClustersPreviewAPI) ClusterDetailsClusterNameToClusterIdMap(ctx context.Context, request ListClustersRequest) (map[string]string, error) { +func (a *ClustersAPI) ClusterDetailsClusterNameToClusterIdMap(ctx context.Context, request ListClustersRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -625,14 +626,14 @@ func (a *ClustersPreviewAPI) ClusterDetailsClusterNameToClusterIdMap(ctx context return mapping, nil } -// GetByClusterName calls [ClustersPreviewAPI.ClusterDetailsClusterNameToClusterIdMap] and returns a single [ClusterDetails]. +// GetByClusterName calls [ClustersAPI.ClusterDetailsClusterNameToClusterIdMap] and returns a single [ClusterDetails]. // // Returns an error if there's more than one [ClusterDetails] with the same .ClusterName. // // Note: All [ClusterDetails] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ClustersPreviewAPI) GetByClusterName(ctx context.Context, name string) (*ClusterDetails, error) { +func (a *ClustersAPI) GetByClusterName(ctx context.Context, name string) (*ClusterDetails, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListClustersRequest{}) if err != nil { @@ -661,8 +662,8 @@ func (a *ClustersPreviewAPI) GetByClusterName(ctx context.Context, name string) // In addition, users will no longer see permanently deleted clusters in the // cluster list, and API users can no longer perform any action on permanently // deleted clusters. -func (a *ClustersPreviewAPI) PermanentDeleteByClusterId(ctx context.Context, clusterId string) error { - return a.clustersPreviewImpl.PermanentDelete(ctx, PermanentDeleteCluster{ +func (a *ClustersAPI) PermanentDeleteByClusterId(ctx context.Context, clusterId string) error { + return a.clustersImpl.PermanentDelete(ctx, PermanentDeleteCluster{ ClusterId: clusterId, }) } @@ -672,8 +673,8 @@ func (a *ClustersPreviewAPI) PermanentDeleteByClusterId(ctx context.Context, clu // Pinning a cluster ensures that the cluster will always be returned by the // ListClusters API. Pinning a cluster that is already pinned will have no // effect. This API can only be called by workspace admins. -func (a *ClustersPreviewAPI) PinByClusterId(ctx context.Context, clusterId string) error { - return a.clustersPreviewImpl.Pin(ctx, PinCluster{ +func (a *ClustersAPI) PinByClusterId(ctx context.Context, clusterId string) error { + return a.clustersImpl.Pin(ctx, PinCluster{ ClusterId: clusterId, }) } @@ -688,8 +689,8 @@ func (a *ClustersPreviewAPI) PinByClusterId(ctx context.Context, clusterId strin // autoscaling cluster, the current cluster starts with the minimum number of // nodes. * If the cluster is not currently in a `TERMINATED` state, nothing // will happen. * Clusters launched to run a job cannot be started. -func (a *ClustersPreviewAPI) StartByClusterId(ctx context.Context, clusterId string) error { - return a.clustersPreviewImpl.Start(ctx, StartCluster{ +func (a *ClustersAPI) StartByClusterId(ctx context.Context, clusterId string) error { + return a.clustersImpl.Start(ctx, StartCluster{ ClusterId: clusterId, }) } @@ -699,13 +700,14 @@ func (a *ClustersPreviewAPI) StartByClusterId(ctx context.Context, clusterId str // Unpinning a cluster will allow the cluster to eventually be removed from the // ListClusters API. Unpinning a cluster that is not pinned will have no effect. // This API can only be called by workspace admins. -func (a *ClustersPreviewAPI) UnpinByClusterId(ctx context.Context, clusterId string) error { - return a.clustersPreviewImpl.Unpin(ctx, UnpinCluster{ +func (a *ClustersAPI) UnpinByClusterId(ctx context.Context, clusterId string) error { + return a.clustersImpl.Unpin(ctx, UnpinCluster{ ClusterId: clusterId, }) } -type CommandExecutionPreviewInterface interface { +type CommandExecutionInterface interface { + commandExecutionAPIUtilities // Cancel a command. // @@ -749,9 +751,9 @@ type CommandExecutionPreviewInterface interface { Execute(ctx context.Context, request Command) (*Created, error) } -func NewCommandExecutionPreview(client *client.DatabricksClient) *CommandExecutionPreviewAPI { - return &CommandExecutionPreviewAPI{ - commandExecutionPreviewImpl: commandExecutionPreviewImpl{ +func NewCommandExecution(client *client.DatabricksClient) *CommandExecutionAPI { + return &CommandExecutionAPI{ + commandExecutionImpl: commandExecutionImpl{ client: client, }, } @@ -760,11 +762,11 @@ func NewCommandExecutionPreview(client *client.DatabricksClient) *CommandExecuti // This API allows execution of Python, Scala, SQL, or R commands on running // Databricks Clusters. This API only supports (classic) all-purpose clusters. // Serverless compute is not supported. -type CommandExecutionPreviewAPI struct { - commandExecutionPreviewImpl +type CommandExecutionAPI struct { + commandExecutionImpl } -type GlobalInitScriptsPreviewInterface interface { +type GlobalInitScriptsInterface interface { // Create init script. // @@ -811,7 +813,7 @@ type GlobalInitScriptsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context) ([]GlobalInitScriptDetails, error) - // GlobalInitScriptDetailsNameToScriptIdMap calls [GlobalInitScriptsPreviewAPI.ListAll] and creates a map of results with [GlobalInitScriptDetails].Name as key and [GlobalInitScriptDetails].ScriptId as value. + // GlobalInitScriptDetailsNameToScriptIdMap calls [GlobalInitScriptsAPI.ListAll] and creates a map of results with [GlobalInitScriptDetails].Name as key and [GlobalInitScriptDetails].ScriptId as value. // // Returns an error if there's more than one [GlobalInitScriptDetails] with the same .Name. // @@ -820,7 +822,7 @@ type GlobalInitScriptsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. GlobalInitScriptDetailsNameToScriptIdMap(ctx context.Context) (map[string]string, error) - // GetByName calls [GlobalInitScriptsPreviewAPI.GlobalInitScriptDetailsNameToScriptIdMap] and returns a single [GlobalInitScriptDetails]. + // GetByName calls [GlobalInitScriptsAPI.GlobalInitScriptDetailsNameToScriptIdMap] and returns a single [GlobalInitScriptDetails]. // // Returns an error if there's more than one [GlobalInitScriptDetails] with the same .Name. // @@ -836,9 +838,9 @@ type GlobalInitScriptsPreviewInterface interface { Update(ctx context.Context, request GlobalInitScriptUpdateRequest) error } -func NewGlobalInitScriptsPreview(client *client.DatabricksClient) *GlobalInitScriptsPreviewAPI { - return &GlobalInitScriptsPreviewAPI{ - globalInitScriptsPreviewImpl: globalInitScriptsPreviewImpl{ +func NewGlobalInitScripts(client *client.DatabricksClient) *GlobalInitScriptsAPI { + return &GlobalInitScriptsAPI{ + globalInitScriptsImpl: globalInitScriptsImpl{ client: client, }, } @@ -854,15 +856,15 @@ func NewGlobalInitScriptsPreview(client *client.DatabricksClient) *GlobalInitScr // launch and init scripts with later position are skipped. If enough containers // fail, the entire cluster fails with a `GLOBAL_INIT_SCRIPT_FAILURE` error // code. -type GlobalInitScriptsPreviewAPI struct { - globalInitScriptsPreviewImpl +type GlobalInitScriptsAPI struct { + globalInitScriptsImpl } // Delete init script. // // Deletes a global init script. -func (a *GlobalInitScriptsPreviewAPI) DeleteByScriptId(ctx context.Context, scriptId string) error { - return a.globalInitScriptsPreviewImpl.Delete(ctx, DeleteGlobalInitScriptRequest{ +func (a *GlobalInitScriptsAPI) DeleteByScriptId(ctx context.Context, scriptId string) error { + return a.globalInitScriptsImpl.Delete(ctx, DeleteGlobalInitScriptRequest{ ScriptId: scriptId, }) } @@ -870,20 +872,20 @@ func (a *GlobalInitScriptsPreviewAPI) DeleteByScriptId(ctx context.Context, scri // Get an init script. // // Gets all the details of a script, including its Base64-encoded contents. -func (a *GlobalInitScriptsPreviewAPI) GetByScriptId(ctx context.Context, scriptId string) (*GlobalInitScriptDetailsWithContent, error) { - return a.globalInitScriptsPreviewImpl.Get(ctx, GetGlobalInitScriptRequest{ +func (a *GlobalInitScriptsAPI) GetByScriptId(ctx context.Context, scriptId string) (*GlobalInitScriptDetailsWithContent, error) { + return a.globalInitScriptsImpl.Get(ctx, GetGlobalInitScriptRequest{ ScriptId: scriptId, }) } -// GlobalInitScriptDetailsNameToScriptIdMap calls [GlobalInitScriptsPreviewAPI.ListAll] and creates a map of results with [GlobalInitScriptDetails].Name as key and [GlobalInitScriptDetails].ScriptId as value. +// GlobalInitScriptDetailsNameToScriptIdMap calls [GlobalInitScriptsAPI.ListAll] and creates a map of results with [GlobalInitScriptDetails].Name as key and [GlobalInitScriptDetails].ScriptId as value. // // Returns an error if there's more than one [GlobalInitScriptDetails] with the same .Name. // // Note: All [GlobalInitScriptDetails] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *GlobalInitScriptsPreviewAPI) GlobalInitScriptDetailsNameToScriptIdMap(ctx context.Context) (map[string]string, error) { +func (a *GlobalInitScriptsAPI) GlobalInitScriptDetailsNameToScriptIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx) @@ -901,14 +903,14 @@ func (a *GlobalInitScriptsPreviewAPI) GlobalInitScriptDetailsNameToScriptIdMap(c return mapping, nil } -// GetByName calls [GlobalInitScriptsPreviewAPI.GlobalInitScriptDetailsNameToScriptIdMap] and returns a single [GlobalInitScriptDetails]. +// GetByName calls [GlobalInitScriptsAPI.GlobalInitScriptDetailsNameToScriptIdMap] and returns a single [GlobalInitScriptDetails]. // // Returns an error if there's more than one [GlobalInitScriptDetails] with the same .Name. // // Note: All [GlobalInitScriptDetails] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *GlobalInitScriptsPreviewAPI) GetByName(ctx context.Context, name string) (*GlobalInitScriptDetails, error) { +func (a *GlobalInitScriptsAPI) GetByName(ctx context.Context, name string) (*GlobalInitScriptDetails, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx) if err != nil { @@ -929,7 +931,7 @@ func (a *GlobalInitScriptsPreviewAPI) GetByName(ctx context.Context, name string return &alternatives[0], nil } -type InstancePoolsPreviewInterface interface { +type InstancePoolsInterface interface { // Create a new instance pool. // @@ -999,7 +1001,7 @@ type InstancePoolsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context) ([]InstancePoolAndStats, error) - // InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap calls [InstancePoolsPreviewAPI.ListAll] and creates a map of results with [InstancePoolAndStats].InstancePoolName as key and [InstancePoolAndStats].InstancePoolId as value. + // InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap calls [InstancePoolsAPI.ListAll] and creates a map of results with [InstancePoolAndStats].InstancePoolName as key and [InstancePoolAndStats].InstancePoolId as value. // // Returns an error if there's more than one [InstancePoolAndStats] with the same .InstancePoolName. // @@ -1008,7 +1010,7 @@ type InstancePoolsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx context.Context) (map[string]string, error) - // GetByInstancePoolName calls [InstancePoolsPreviewAPI.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap] and returns a single [InstancePoolAndStats]. + // GetByInstancePoolName calls [InstancePoolsAPI.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap] and returns a single [InstancePoolAndStats]. // // Returns an error if there's more than one [InstancePoolAndStats] with the same .InstancePoolName. // @@ -1031,9 +1033,9 @@ type InstancePoolsPreviewInterface interface { UpdatePermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error) } -func NewInstancePoolsPreview(client *client.DatabricksClient) *InstancePoolsPreviewAPI { - return &InstancePoolsPreviewAPI{ - instancePoolsPreviewImpl: instancePoolsPreviewImpl{ +func NewInstancePools(client *client.DatabricksClient) *InstancePoolsAPI { + return &InstancePoolsAPI{ + instancePoolsImpl: instancePoolsImpl{ client: client, }, } @@ -1057,16 +1059,16 @@ func NewInstancePoolsPreview(client *client.DatabricksClient) *InstancePoolsPrev // // Databricks does not charge DBUs while instances are idle in the pool. // Instance provider billing does apply. See pricing. -type InstancePoolsPreviewAPI struct { - instancePoolsPreviewImpl +type InstancePoolsAPI struct { + instancePoolsImpl } // Delete an instance pool. // // Deletes the instance pool permanently. The idle instances in the pool are // terminated asynchronously. -func (a *InstancePoolsPreviewAPI) DeleteByInstancePoolId(ctx context.Context, instancePoolId string) error { - return a.instancePoolsPreviewImpl.Delete(ctx, DeleteInstancePool{ +func (a *InstancePoolsAPI) DeleteByInstancePoolId(ctx context.Context, instancePoolId string) error { + return a.instancePoolsImpl.Delete(ctx, DeleteInstancePool{ InstancePoolId: instancePoolId, }) } @@ -1074,8 +1076,8 @@ func (a *InstancePoolsPreviewAPI) DeleteByInstancePoolId(ctx context.Context, in // Get instance pool information. // // Retrieve the information for an instance pool based on its identifier. -func (a *InstancePoolsPreviewAPI) GetByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePool, error) { - return a.instancePoolsPreviewImpl.Get(ctx, GetInstancePoolRequest{ +func (a *InstancePoolsAPI) GetByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePool, error) { + return a.instancePoolsImpl.Get(ctx, GetInstancePoolRequest{ InstancePoolId: instancePoolId, }) } @@ -1083,8 +1085,8 @@ func (a *InstancePoolsPreviewAPI) GetByInstancePoolId(ctx context.Context, insta // Get instance pool permission levels. // // Gets the permission levels that a user can have on an object. -func (a *InstancePoolsPreviewAPI) GetPermissionLevelsByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePoolPermissionLevelsResponse, error) { - return a.instancePoolsPreviewImpl.GetPermissionLevels(ctx, GetInstancePoolPermissionLevelsRequest{ +func (a *InstancePoolsAPI) GetPermissionLevelsByInstancePoolId(ctx context.Context, instancePoolId string) (*GetInstancePoolPermissionLevelsResponse, error) { + return a.instancePoolsImpl.GetPermissionLevels(ctx, GetInstancePoolPermissionLevelsRequest{ InstancePoolId: instancePoolId, }) } @@ -1093,20 +1095,20 @@ func (a *InstancePoolsPreviewAPI) GetPermissionLevelsByInstancePoolId(ctx contex // // Gets the permissions of an instance pool. Instance pools can inherit // permissions from their root object. -func (a *InstancePoolsPreviewAPI) GetPermissionsByInstancePoolId(ctx context.Context, instancePoolId string) (*InstancePoolPermissions, error) { - return a.instancePoolsPreviewImpl.GetPermissions(ctx, GetInstancePoolPermissionsRequest{ +func (a *InstancePoolsAPI) GetPermissionsByInstancePoolId(ctx context.Context, instancePoolId string) (*InstancePoolPermissions, error) { + return a.instancePoolsImpl.GetPermissions(ctx, GetInstancePoolPermissionsRequest{ InstancePoolId: instancePoolId, }) } -// InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap calls [InstancePoolsPreviewAPI.ListAll] and creates a map of results with [InstancePoolAndStats].InstancePoolName as key and [InstancePoolAndStats].InstancePoolId as value. +// InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap calls [InstancePoolsAPI.ListAll] and creates a map of results with [InstancePoolAndStats].InstancePoolName as key and [InstancePoolAndStats].InstancePoolId as value. // // Returns an error if there's more than one [InstancePoolAndStats] with the same .InstancePoolName. // // Note: All [InstancePoolAndStats] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *InstancePoolsPreviewAPI) InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx context.Context) (map[string]string, error) { +func (a *InstancePoolsAPI) InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx) @@ -1124,14 +1126,14 @@ func (a *InstancePoolsPreviewAPI) InstancePoolAndStatsInstancePoolNameToInstance return mapping, nil } -// GetByInstancePoolName calls [InstancePoolsPreviewAPI.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap] and returns a single [InstancePoolAndStats]. +// GetByInstancePoolName calls [InstancePoolsAPI.InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap] and returns a single [InstancePoolAndStats]. // // Returns an error if there's more than one [InstancePoolAndStats] with the same .InstancePoolName. // // Note: All [InstancePoolAndStats] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *InstancePoolsPreviewAPI) GetByInstancePoolName(ctx context.Context, name string) (*InstancePoolAndStats, error) { +func (a *InstancePoolsAPI) GetByInstancePoolName(ctx context.Context, name string) (*InstancePoolAndStats, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx) if err != nil { @@ -1152,7 +1154,7 @@ func (a *InstancePoolsPreviewAPI) GetByInstancePoolName(ctx context.Context, nam return &alternatives[0], nil } -type InstanceProfilesPreviewInterface interface { +type InstanceProfilesInterface interface { // Register an instance profile. // @@ -1214,9 +1216,9 @@ type InstanceProfilesPreviewInterface interface { RemoveByInstanceProfileArn(ctx context.Context, instanceProfileArn string) error } -func NewInstanceProfilesPreview(client *client.DatabricksClient) *InstanceProfilesPreviewAPI { - return &InstanceProfilesPreviewAPI{ - instanceProfilesPreviewImpl: instanceProfilesPreviewImpl{ +func NewInstanceProfiles(client *client.DatabricksClient) *InstanceProfilesAPI { + return &InstanceProfilesAPI{ + instanceProfilesImpl: instanceProfilesImpl{ client: client, }, } @@ -1228,8 +1230,8 @@ func NewInstanceProfilesPreview(client *client.DatabricksClient) *InstanceProfil // instance profiles for more information. // // [Secure access to S3 buckets]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/instance-profiles.html -type InstanceProfilesPreviewAPI struct { - instanceProfilesPreviewImpl +type InstanceProfilesAPI struct { + instanceProfilesImpl } // Remove the instance profile. @@ -1238,13 +1240,14 @@ type InstanceProfilesPreviewAPI struct { // this instance profile will continue to function. // // This API is only accessible to admin users. -func (a *InstanceProfilesPreviewAPI) RemoveByInstanceProfileArn(ctx context.Context, instanceProfileArn string) error { - return a.instanceProfilesPreviewImpl.Remove(ctx, RemoveInstanceProfile{ +func (a *InstanceProfilesAPI) RemoveByInstanceProfileArn(ctx context.Context, instanceProfileArn string) error { + return a.instanceProfilesImpl.Remove(ctx, RemoveInstanceProfile{ InstanceProfileArn: instanceProfileArn, }) } -type LibrariesPreviewInterface interface { +type LibrariesInterface interface { + librariesAPIUtilities // Get all statuses. // @@ -1313,9 +1316,9 @@ type LibrariesPreviewInterface interface { Uninstall(ctx context.Context, request UninstallLibraries) error } -func NewLibrariesPreview(client *client.DatabricksClient) *LibrariesPreviewAPI { - return &LibrariesPreviewAPI{ - librariesPreviewImpl: librariesPreviewImpl{ +func NewLibraries(client *client.DatabricksClient) *LibrariesAPI { + return &LibrariesAPI{ + librariesImpl: librariesImpl{ client: client, }, } @@ -1337,8 +1340,8 @@ func NewLibrariesPreview(client *client.DatabricksClient) *LibrariesPreviewAPI { // When you uninstall a library from a cluster, the library is removed only when // you restart the cluster. Until you restart the cluster, the status of the // uninstalled library appears as Uninstall pending restart. -type LibrariesPreviewAPI struct { - librariesPreviewImpl +type LibrariesAPI struct { + librariesImpl } // Get status. @@ -1350,13 +1353,13 @@ type LibrariesPreviewAPI struct { // are returned first. 2. Libraries that were previously requested to be // installed on this cluster or, but are now marked for removal, in no // particular order, are returned last. -func (a *LibrariesPreviewAPI) ClusterStatusByClusterId(ctx context.Context, clusterId string) (*ClusterLibraryStatuses, error) { - return a.librariesPreviewImpl.internalClusterStatus(ctx, ClusterStatus{ +func (a *LibrariesAPI) ClusterStatusByClusterId(ctx context.Context, clusterId string) (*ClusterLibraryStatuses, error) { + return a.librariesImpl.internalClusterStatus(ctx, ClusterStatus{ ClusterId: clusterId, }) } -type PolicyComplianceForClustersPreviewInterface interface { +type PolicyComplianceForClustersInterface interface { // Enforce cluster policy compliance. // @@ -1406,9 +1409,9 @@ type PolicyComplianceForClustersPreviewInterface interface { ListComplianceAll(ctx context.Context, request ListClusterCompliancesRequest) ([]ClusterCompliance, error) } -func NewPolicyComplianceForClustersPreview(client *client.DatabricksClient) *PolicyComplianceForClustersPreviewAPI { - return &PolicyComplianceForClustersPreviewAPI{ - policyComplianceForClustersPreviewImpl: policyComplianceForClustersPreviewImpl{ +func NewPolicyComplianceForClusters(client *client.DatabricksClient) *PolicyComplianceForClustersAPI { + return &PolicyComplianceForClustersAPI{ + policyComplianceForClustersImpl: policyComplianceForClustersImpl{ client: client, }, } @@ -1424,21 +1427,21 @@ func NewPolicyComplianceForClustersPreview(client *client.DatabricksClient) *Pol // The get and list compliance APIs allow you to view the policy compliance // status of a cluster. The enforce compliance API allows you to update a // cluster to be compliant with the current version of its policy. -type PolicyComplianceForClustersPreviewAPI struct { - policyComplianceForClustersPreviewImpl +type PolicyComplianceForClustersAPI struct { + policyComplianceForClustersImpl } // Get cluster policy compliance. // // Returns the policy compliance status of a cluster. Clusters could be out of // compliance if their policy was updated after the cluster was last edited. -func (a *PolicyComplianceForClustersPreviewAPI) GetComplianceByClusterId(ctx context.Context, clusterId string) (*GetClusterComplianceResponse, error) { - return a.policyComplianceForClustersPreviewImpl.GetCompliance(ctx, GetClusterComplianceRequest{ +func (a *PolicyComplianceForClustersAPI) GetComplianceByClusterId(ctx context.Context, clusterId string) (*GetClusterComplianceResponse, error) { + return a.policyComplianceForClustersImpl.GetCompliance(ctx, GetClusterComplianceRequest{ ClusterId: clusterId, }) } -type PolicyFamiliesPreviewInterface interface { +type PolicyFamiliesInterface interface { // Get policy family information. // @@ -1469,9 +1472,9 @@ type PolicyFamiliesPreviewInterface interface { ListAll(ctx context.Context, request ListPolicyFamiliesRequest) ([]PolicyFamily, error) } -func NewPolicyFamiliesPreview(client *client.DatabricksClient) *PolicyFamiliesPreviewAPI { - return &PolicyFamiliesPreviewAPI{ - policyFamiliesPreviewImpl: policyFamiliesPreviewImpl{ +func NewPolicyFamilies(client *client.DatabricksClient) *PolicyFamiliesAPI { + return &PolicyFamiliesAPI{ + policyFamiliesImpl: policyFamiliesImpl{ client: client, }, } @@ -1486,16 +1489,16 @@ func NewPolicyFamiliesPreview(client *client.DatabricksClient) *PolicyFamiliesPr // Policy families cannot be used directly to create clusters. Instead, you // create cluster policies using a policy family. Cluster policies created using // a policy family inherit the policy family's policy definition. -type PolicyFamiliesPreviewAPI struct { - policyFamiliesPreviewImpl +type PolicyFamiliesAPI struct { + policyFamiliesImpl } // Get policy family information. // // Retrieve the information for an policy family based on its identifier and // version -func (a *PolicyFamiliesPreviewAPI) GetByPolicyFamilyId(ctx context.Context, policyFamilyId string) (*PolicyFamily, error) { - return a.policyFamiliesPreviewImpl.Get(ctx, GetPolicyFamilyRequest{ +func (a *PolicyFamiliesAPI) GetByPolicyFamilyId(ctx context.Context, policyFamilyId string) (*PolicyFamily, error) { + return a.policyFamiliesImpl.Get(ctx, GetPolicyFamilyRequest{ PolicyFamilyId: policyFamilyId, }) } diff --git a/compute/v2preview/client.go b/compute/v2preview/client.go index 77a716429..48a4f822a 100755 --- a/compute/v2preview/client.go +++ b/compute/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type ClusterPoliciesPreviewClient struct { - ClusterPoliciesPreviewInterface +type ClusterPoliciesClient struct { + ClusterPoliciesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewClusterPoliciesPreviewClient(cfg *config.Config) (*ClusterPoliciesPreviewClient, error) { +func NewClusterPoliciesClient(cfg *config.Config) (*ClusterPoliciesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewClusterPoliciesPreviewClient(cfg *config.Config) (*ClusterPoliciesPrevie return nil, err } - return &ClusterPoliciesPreviewClient{ - Config: cfg, - apiClient: apiClient, - ClusterPoliciesPreviewInterface: NewClusterPoliciesPreview(databricksClient), + return &ClusterPoliciesClient{ + Config: cfg, + apiClient: apiClient, + ClusterPoliciesInterface: NewClusterPolicies(databricksClient), }, nil } -type ClustersPreviewClient struct { - ClustersPreviewInterface +type ClustersClient struct { + ClustersInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewClustersPreviewClient(cfg *config.Config) (*ClustersPreviewClient, error) { +func NewClustersClient(cfg *config.Config) (*ClustersClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,20 +71,20 @@ func NewClustersPreviewClient(cfg *config.Config) (*ClustersPreviewClient, error return nil, err } - return &ClustersPreviewClient{ - Config: cfg, - apiClient: apiClient, - ClustersPreviewInterface: NewClustersPreview(databricksClient), + return &ClustersClient{ + Config: cfg, + apiClient: apiClient, + ClustersInterface: NewClusters(databricksClient), }, nil } -type CommandExecutionPreviewClient struct { - CommandExecutionPreviewInterface +type CommandExecutionClient struct { + CommandExecutionInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewCommandExecutionPreviewClient(cfg *config.Config) (*CommandExecutionPreviewClient, error) { +func NewCommandExecutionClient(cfg *config.Config) (*CommandExecutionClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -105,20 +105,20 @@ func NewCommandExecutionPreviewClient(cfg *config.Config) (*CommandExecutionPrev return nil, err } - return &CommandExecutionPreviewClient{ - Config: cfg, - apiClient: apiClient, - CommandExecutionPreviewInterface: NewCommandExecutionPreview(databricksClient), + return &CommandExecutionClient{ + Config: cfg, + apiClient: apiClient, + CommandExecutionInterface: NewCommandExecution(databricksClient), }, nil } -type GlobalInitScriptsPreviewClient struct { - GlobalInitScriptsPreviewInterface +type GlobalInitScriptsClient struct { + GlobalInitScriptsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewGlobalInitScriptsPreviewClient(cfg *config.Config) (*GlobalInitScriptsPreviewClient, error) { +func NewGlobalInitScriptsClient(cfg *config.Config) (*GlobalInitScriptsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -139,20 +139,20 @@ func NewGlobalInitScriptsPreviewClient(cfg *config.Config) (*GlobalInitScriptsPr return nil, err } - return &GlobalInitScriptsPreviewClient{ - Config: cfg, - apiClient: apiClient, - GlobalInitScriptsPreviewInterface: NewGlobalInitScriptsPreview(databricksClient), + return &GlobalInitScriptsClient{ + Config: cfg, + apiClient: apiClient, + GlobalInitScriptsInterface: NewGlobalInitScripts(databricksClient), }, nil } -type InstancePoolsPreviewClient struct { - InstancePoolsPreviewInterface +type InstancePoolsClient struct { + InstancePoolsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewInstancePoolsPreviewClient(cfg *config.Config) (*InstancePoolsPreviewClient, error) { +func NewInstancePoolsClient(cfg *config.Config) (*InstancePoolsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -173,20 +173,20 @@ func NewInstancePoolsPreviewClient(cfg *config.Config) (*InstancePoolsPreviewCli return nil, err } - return &InstancePoolsPreviewClient{ - Config: cfg, - apiClient: apiClient, - InstancePoolsPreviewInterface: NewInstancePoolsPreview(databricksClient), + return &InstancePoolsClient{ + Config: cfg, + apiClient: apiClient, + InstancePoolsInterface: NewInstancePools(databricksClient), }, nil } -type InstanceProfilesPreviewClient struct { - InstanceProfilesPreviewInterface +type InstanceProfilesClient struct { + InstanceProfilesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewInstanceProfilesPreviewClient(cfg *config.Config) (*InstanceProfilesPreviewClient, error) { +func NewInstanceProfilesClient(cfg *config.Config) (*InstanceProfilesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -207,20 +207,20 @@ func NewInstanceProfilesPreviewClient(cfg *config.Config) (*InstanceProfilesPrev return nil, err } - return &InstanceProfilesPreviewClient{ - Config: cfg, - apiClient: apiClient, - InstanceProfilesPreviewInterface: NewInstanceProfilesPreview(databricksClient), + return &InstanceProfilesClient{ + Config: cfg, + apiClient: apiClient, + InstanceProfilesInterface: NewInstanceProfiles(databricksClient), }, nil } -type LibrariesPreviewClient struct { - LibrariesPreviewInterface +type LibrariesClient struct { + LibrariesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewLibrariesPreviewClient(cfg *config.Config) (*LibrariesPreviewClient, error) { +func NewLibrariesClient(cfg *config.Config) (*LibrariesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -241,20 +241,20 @@ func NewLibrariesPreviewClient(cfg *config.Config) (*LibrariesPreviewClient, err return nil, err } - return &LibrariesPreviewClient{ - Config: cfg, - apiClient: apiClient, - LibrariesPreviewInterface: NewLibrariesPreview(databricksClient), + return &LibrariesClient{ + Config: cfg, + apiClient: apiClient, + LibrariesInterface: NewLibraries(databricksClient), }, nil } -type PolicyComplianceForClustersPreviewClient struct { - PolicyComplianceForClustersPreviewInterface +type PolicyComplianceForClustersClient struct { + PolicyComplianceForClustersInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewPolicyComplianceForClustersPreviewClient(cfg *config.Config) (*PolicyComplianceForClustersPreviewClient, error) { +func NewPolicyComplianceForClustersClient(cfg *config.Config) (*PolicyComplianceForClustersClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -275,20 +275,20 @@ func NewPolicyComplianceForClustersPreviewClient(cfg *config.Config) (*PolicyCom return nil, err } - return &PolicyComplianceForClustersPreviewClient{ - Config: cfg, - apiClient: apiClient, - PolicyComplianceForClustersPreviewInterface: NewPolicyComplianceForClustersPreview(databricksClient), + return &PolicyComplianceForClustersClient{ + Config: cfg, + apiClient: apiClient, + PolicyComplianceForClustersInterface: NewPolicyComplianceForClusters(databricksClient), }, nil } -type PolicyFamiliesPreviewClient struct { - PolicyFamiliesPreviewInterface +type PolicyFamiliesClient struct { + PolicyFamiliesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewPolicyFamiliesPreviewClient(cfg *config.Config) (*PolicyFamiliesPreviewClient, error) { +func NewPolicyFamiliesClient(cfg *config.Config) (*PolicyFamiliesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -309,9 +309,9 @@ func NewPolicyFamiliesPreviewClient(cfg *config.Config) (*PolicyFamiliesPreviewC return nil, err } - return &PolicyFamiliesPreviewClient{ - Config: cfg, - apiClient: apiClient, - PolicyFamiliesPreviewInterface: NewPolicyFamiliesPreview(databricksClient), + return &PolicyFamiliesClient{ + Config: cfg, + apiClient: apiClient, + PolicyFamiliesInterface: NewPolicyFamilies(databricksClient), }, nil } diff --git a/compute/v2preview/impl.go b/compute/v2preview/impl.go index 5855c50d2..896ffea7f 100755 --- a/compute/v2preview/impl.go +++ b/compute/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just ClusterPoliciesPreview API methods -type clusterPoliciesPreviewImpl struct { +// unexported type that holds implementations of just ClusterPolicies API methods +type clusterPoliciesImpl struct { client *client.DatabricksClient } -func (a *clusterPoliciesPreviewImpl) Create(ctx context.Context, request CreatePolicy) (*CreatePolicyResponse, error) { +func (a *clusterPoliciesImpl) Create(ctx context.Context, request CreatePolicy) (*CreatePolicyResponse, error) { var createPolicyResponse CreatePolicyResponse path := "/api/2.0preview/policies/clusters/create" queryParams := make(map[string]any) @@ -28,7 +28,7 @@ func (a *clusterPoliciesPreviewImpl) Create(ctx context.Context, request CreateP return &createPolicyResponse, err } -func (a *clusterPoliciesPreviewImpl) Delete(ctx context.Context, request DeletePolicy) error { +func (a *clusterPoliciesImpl) Delete(ctx context.Context, request DeletePolicy) error { var deletePolicyResponse DeletePolicyResponse path := "/api/2.0preview/policies/clusters/delete" queryParams := make(map[string]any) @@ -39,7 +39,7 @@ func (a *clusterPoliciesPreviewImpl) Delete(ctx context.Context, request DeleteP return err } -func (a *clusterPoliciesPreviewImpl) Edit(ctx context.Context, request EditPolicy) error { +func (a *clusterPoliciesImpl) Edit(ctx context.Context, request EditPolicy) error { var editPolicyResponse EditPolicyResponse path := "/api/2.0preview/policies/clusters/edit" queryParams := make(map[string]any) @@ -50,7 +50,7 @@ func (a *clusterPoliciesPreviewImpl) Edit(ctx context.Context, request EditPolic return err } -func (a *clusterPoliciesPreviewImpl) Get(ctx context.Context, request GetClusterPolicyRequest) (*Policy, error) { +func (a *clusterPoliciesImpl) Get(ctx context.Context, request GetClusterPolicyRequest) (*Policy, error) { var policy Policy path := "/api/2.0preview/policies/clusters/get" queryParams := make(map[string]any) @@ -60,7 +60,7 @@ func (a *clusterPoliciesPreviewImpl) Get(ctx context.Context, request GetCluster return &policy, err } -func (a *clusterPoliciesPreviewImpl) GetPermissionLevels(ctx context.Context, request GetClusterPolicyPermissionLevelsRequest) (*GetClusterPolicyPermissionLevelsResponse, error) { +func (a *clusterPoliciesImpl) GetPermissionLevels(ctx context.Context, request GetClusterPolicyPermissionLevelsRequest) (*GetClusterPolicyPermissionLevelsResponse, error) { var getClusterPolicyPermissionLevelsResponse GetClusterPolicyPermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/cluster-policies/%v/permissionLevels", request.ClusterPolicyId) queryParams := make(map[string]any) @@ -70,7 +70,7 @@ func (a *clusterPoliciesPreviewImpl) GetPermissionLevels(ctx context.Context, re return &getClusterPolicyPermissionLevelsResponse, err } -func (a *clusterPoliciesPreviewImpl) GetPermissions(ctx context.Context, request GetClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { +func (a *clusterPoliciesImpl) GetPermissions(ctx context.Context, request GetClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { var clusterPolicyPermissions ClusterPolicyPermissions path := fmt.Sprintf("/api/2.0preview/permissions/cluster-policies/%v", request.ClusterPolicyId) queryParams := make(map[string]any) @@ -83,7 +83,7 @@ func (a *clusterPoliciesPreviewImpl) GetPermissions(ctx context.Context, request // List cluster policies. // // Returns a list of policies accessible by the requesting user. -func (a *clusterPoliciesPreviewImpl) List(ctx context.Context, request ListClusterPoliciesRequest) listing.Iterator[Policy] { +func (a *clusterPoliciesImpl) List(ctx context.Context, request ListClusterPoliciesRequest) listing.Iterator[Policy] { getNextPage := func(ctx context.Context, req ListClusterPoliciesRequest) (*ListPoliciesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -104,11 +104,11 @@ func (a *clusterPoliciesPreviewImpl) List(ctx context.Context, request ListClust // List cluster policies. // // Returns a list of policies accessible by the requesting user. -func (a *clusterPoliciesPreviewImpl) ListAll(ctx context.Context, request ListClusterPoliciesRequest) ([]Policy, error) { +func (a *clusterPoliciesImpl) ListAll(ctx context.Context, request ListClusterPoliciesRequest) ([]Policy, error) { iterator := a.List(ctx, request) return listing.ToSlice[Policy](ctx, iterator) } -func (a *clusterPoliciesPreviewImpl) internalList(ctx context.Context, request ListClusterPoliciesRequest) (*ListPoliciesResponse, error) { +func (a *clusterPoliciesImpl) internalList(ctx context.Context, request ListClusterPoliciesRequest) (*ListPoliciesResponse, error) { var listPoliciesResponse ListPoliciesResponse path := "/api/2.0preview/policies/clusters/list" queryParams := make(map[string]any) @@ -118,7 +118,7 @@ func (a *clusterPoliciesPreviewImpl) internalList(ctx context.Context, request L return &listPoliciesResponse, err } -func (a *clusterPoliciesPreviewImpl) SetPermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { +func (a *clusterPoliciesImpl) SetPermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { var clusterPolicyPermissions ClusterPolicyPermissions path := fmt.Sprintf("/api/2.0preview/permissions/cluster-policies/%v", request.ClusterPolicyId) queryParams := make(map[string]any) @@ -129,7 +129,7 @@ func (a *clusterPoliciesPreviewImpl) SetPermissions(ctx context.Context, request return &clusterPolicyPermissions, err } -func (a *clusterPoliciesPreviewImpl) UpdatePermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { +func (a *clusterPoliciesImpl) UpdatePermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { var clusterPolicyPermissions ClusterPolicyPermissions path := fmt.Sprintf("/api/2.0preview/permissions/cluster-policies/%v", request.ClusterPolicyId) queryParams := make(map[string]any) @@ -140,12 +140,12 @@ func (a *clusterPoliciesPreviewImpl) UpdatePermissions(ctx context.Context, requ return &clusterPolicyPermissions, err } -// unexported type that holds implementations of just ClustersPreview API methods -type clustersPreviewImpl struct { +// unexported type that holds implementations of just Clusters API methods +type clustersImpl struct { client *client.DatabricksClient } -func (a *clustersPreviewImpl) ChangeOwner(ctx context.Context, request ChangeClusterOwner) error { +func (a *clustersImpl) ChangeOwner(ctx context.Context, request ChangeClusterOwner) error { var changeClusterOwnerResponse ChangeClusterOwnerResponse path := "/api/2.1preview/clusters/change-owner" queryParams := make(map[string]any) @@ -156,7 +156,7 @@ func (a *clustersPreviewImpl) ChangeOwner(ctx context.Context, request ChangeClu return err } -func (a *clustersPreviewImpl) Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error) { +func (a *clustersImpl) Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error) { var createClusterResponse CreateClusterResponse path := "/api/2.1preview/clusters/create" queryParams := make(map[string]any) @@ -167,7 +167,7 @@ func (a *clustersPreviewImpl) Create(ctx context.Context, request CreateCluster) return &createClusterResponse, err } -func (a *clustersPreviewImpl) Delete(ctx context.Context, request DeleteCluster) error { +func (a *clustersImpl) Delete(ctx context.Context, request DeleteCluster) error { var deleteClusterResponse DeleteClusterResponse path := "/api/2.1preview/clusters/delete" queryParams := make(map[string]any) @@ -178,7 +178,7 @@ func (a *clustersPreviewImpl) Delete(ctx context.Context, request DeleteCluster) return err } -func (a *clustersPreviewImpl) Edit(ctx context.Context, request EditCluster) error { +func (a *clustersImpl) Edit(ctx context.Context, request EditCluster) error { var editClusterResponse EditClusterResponse path := "/api/2.1preview/clusters/edit" queryParams := make(map[string]any) @@ -194,7 +194,7 @@ func (a *clustersPreviewImpl) Edit(ctx context.Context, request EditCluster) err // Retrieves a list of events about the activity of a cluster. This API is // paginated. If there are more events to read, the response includes all the // nparameters necessary to request the next page of events. -func (a *clustersPreviewImpl) Events(ctx context.Context, request GetEvents) listing.Iterator[ClusterEvent] { +func (a *clustersImpl) Events(ctx context.Context, request GetEvents) listing.Iterator[ClusterEvent] { getNextPage := func(ctx context.Context, req GetEvents) (*GetEventsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -204,10 +204,11 @@ func (a *clustersPreviewImpl) Events(ctx context.Context, request GetEvents) lis return resp.Events } getNextReq := func(resp *GetEventsResponse) *GetEvents { - if len(getItems(resp)) == 0 { + if resp.NextPage == nil { return nil } - request.Offset = resp.Offset + int64(len(resp.Events)) + request = *resp.NextPage + return &request } iterator := listing.NewIterator( @@ -223,12 +224,12 @@ func (a *clustersPreviewImpl) Events(ctx context.Context, request GetEvents) lis // Retrieves a list of events about the activity of a cluster. This API is // paginated. If there are more events to read, the response includes all the // nparameters necessary to request the next page of events. -func (a *clustersPreviewImpl) EventsAll(ctx context.Context, request GetEvents) ([]ClusterEvent, error) { +func (a *clustersImpl) EventsAll(ctx context.Context, request GetEvents) ([]ClusterEvent, error) { iterator := a.Events(ctx, request) return listing.ToSliceN[ClusterEvent, int64](ctx, iterator, request.Limit) } -func (a *clustersPreviewImpl) internalEvents(ctx context.Context, request GetEvents) (*GetEventsResponse, error) { +func (a *clustersImpl) internalEvents(ctx context.Context, request GetEvents) (*GetEventsResponse, error) { var getEventsResponse GetEventsResponse path := "/api/2.1preview/clusters/events" queryParams := make(map[string]any) @@ -239,7 +240,7 @@ func (a *clustersPreviewImpl) internalEvents(ctx context.Context, request GetEve return &getEventsResponse, err } -func (a *clustersPreviewImpl) Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error) { +func (a *clustersImpl) Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error) { var clusterDetails ClusterDetails path := "/api/2.1preview/clusters/get" queryParams := make(map[string]any) @@ -249,7 +250,7 @@ func (a *clustersPreviewImpl) Get(ctx context.Context, request GetClusterRequest return &clusterDetails, err } -func (a *clustersPreviewImpl) GetPermissionLevels(ctx context.Context, request GetClusterPermissionLevelsRequest) (*GetClusterPermissionLevelsResponse, error) { +func (a *clustersImpl) GetPermissionLevels(ctx context.Context, request GetClusterPermissionLevelsRequest) (*GetClusterPermissionLevelsResponse, error) { var getClusterPermissionLevelsResponse GetClusterPermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/clusters/%v/permissionLevels", request.ClusterId) queryParams := make(map[string]any) @@ -259,7 +260,7 @@ func (a *clustersPreviewImpl) GetPermissionLevels(ctx context.Context, request G return &getClusterPermissionLevelsResponse, err } -func (a *clustersPreviewImpl) GetPermissions(ctx context.Context, request GetClusterPermissionsRequest) (*ClusterPermissions, error) { +func (a *clustersImpl) GetPermissions(ctx context.Context, request GetClusterPermissionsRequest) (*ClusterPermissions, error) { var clusterPermissions ClusterPermissions path := fmt.Sprintf("/api/2.0preview/permissions/clusters/%v", request.ClusterId) queryParams := make(map[string]any) @@ -274,7 +275,7 @@ func (a *clustersPreviewImpl) GetPermissions(ctx context.Context, request GetClu // Return information about all pinned and active clusters, and all clusters // terminated within the last 30 days. Clusters terminated prior to this period // are not included. -func (a *clustersPreviewImpl) List(ctx context.Context, request ListClustersRequest) listing.Iterator[ClusterDetails] { +func (a *clustersImpl) List(ctx context.Context, request ListClustersRequest) listing.Iterator[ClusterDetails] { getNextPage := func(ctx context.Context, req ListClustersRequest) (*ListClustersResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -303,12 +304,12 @@ func (a *clustersPreviewImpl) List(ctx context.Context, request ListClustersRequ // Return information about all pinned and active clusters, and all clusters // terminated within the last 30 days. Clusters terminated prior to this period // are not included. -func (a *clustersPreviewImpl) ListAll(ctx context.Context, request ListClustersRequest) ([]ClusterDetails, error) { +func (a *clustersImpl) ListAll(ctx context.Context, request ListClustersRequest) ([]ClusterDetails, error) { iterator := a.List(ctx, request) return listing.ToSliceN[ClusterDetails, int](ctx, iterator, request.PageSize) } -func (a *clustersPreviewImpl) internalList(ctx context.Context, request ListClustersRequest) (*ListClustersResponse, error) { +func (a *clustersImpl) internalList(ctx context.Context, request ListClustersRequest) (*ListClustersResponse, error) { var listClustersResponse ListClustersResponse path := "/api/2.1preview/clusters/list" queryParams := make(map[string]any) @@ -318,7 +319,7 @@ func (a *clustersPreviewImpl) internalList(ctx context.Context, request ListClus return &listClustersResponse, err } -func (a *clustersPreviewImpl) ListNodeTypes(ctx context.Context) (*ListNodeTypesResponse, error) { +func (a *clustersImpl) ListNodeTypes(ctx context.Context) (*ListNodeTypesResponse, error) { var listNodeTypesResponse ListNodeTypesResponse path := "/api/2.1preview/clusters/list-node-types" @@ -328,7 +329,7 @@ func (a *clustersPreviewImpl) ListNodeTypes(ctx context.Context) (*ListNodeTypes return &listNodeTypesResponse, err } -func (a *clustersPreviewImpl) ListZones(ctx context.Context) (*ListAvailableZonesResponse, error) { +func (a *clustersImpl) ListZones(ctx context.Context) (*ListAvailableZonesResponse, error) { var listAvailableZonesResponse ListAvailableZonesResponse path := "/api/2.1preview/clusters/list-zones" @@ -338,7 +339,7 @@ func (a *clustersPreviewImpl) ListZones(ctx context.Context) (*ListAvailableZone return &listAvailableZonesResponse, err } -func (a *clustersPreviewImpl) PermanentDelete(ctx context.Context, request PermanentDeleteCluster) error { +func (a *clustersImpl) PermanentDelete(ctx context.Context, request PermanentDeleteCluster) error { var permanentDeleteClusterResponse PermanentDeleteClusterResponse path := "/api/2.1preview/clusters/permanent-delete" queryParams := make(map[string]any) @@ -349,7 +350,7 @@ func (a *clustersPreviewImpl) PermanentDelete(ctx context.Context, request Perma return err } -func (a *clustersPreviewImpl) Pin(ctx context.Context, request PinCluster) error { +func (a *clustersImpl) Pin(ctx context.Context, request PinCluster) error { var pinClusterResponse PinClusterResponse path := "/api/2.1preview/clusters/pin" queryParams := make(map[string]any) @@ -360,7 +361,7 @@ func (a *clustersPreviewImpl) Pin(ctx context.Context, request PinCluster) error return err } -func (a *clustersPreviewImpl) Resize(ctx context.Context, request ResizeCluster) error { +func (a *clustersImpl) Resize(ctx context.Context, request ResizeCluster) error { var resizeClusterResponse ResizeClusterResponse path := "/api/2.1preview/clusters/resize" queryParams := make(map[string]any) @@ -371,7 +372,7 @@ func (a *clustersPreviewImpl) Resize(ctx context.Context, request ResizeCluster) return err } -func (a *clustersPreviewImpl) Restart(ctx context.Context, request RestartCluster) error { +func (a *clustersImpl) Restart(ctx context.Context, request RestartCluster) error { var restartClusterResponse RestartClusterResponse path := "/api/2.1preview/clusters/restart" queryParams := make(map[string]any) @@ -382,7 +383,7 @@ func (a *clustersPreviewImpl) Restart(ctx context.Context, request RestartCluste return err } -func (a *clustersPreviewImpl) SetPermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error) { +func (a *clustersImpl) SetPermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error) { var clusterPermissions ClusterPermissions path := fmt.Sprintf("/api/2.0preview/permissions/clusters/%v", request.ClusterId) queryParams := make(map[string]any) @@ -393,7 +394,7 @@ func (a *clustersPreviewImpl) SetPermissions(ctx context.Context, request Cluste return &clusterPermissions, err } -func (a *clustersPreviewImpl) SparkVersions(ctx context.Context) (*GetSparkVersionsResponse, error) { +func (a *clustersImpl) SparkVersions(ctx context.Context) (*GetSparkVersionsResponse, error) { var getSparkVersionsResponse GetSparkVersionsResponse path := "/api/2.1preview/clusters/spark-versions" @@ -403,7 +404,7 @@ func (a *clustersPreviewImpl) SparkVersions(ctx context.Context) (*GetSparkVersi return &getSparkVersionsResponse, err } -func (a *clustersPreviewImpl) Start(ctx context.Context, request StartCluster) error { +func (a *clustersImpl) Start(ctx context.Context, request StartCluster) error { var startClusterResponse StartClusterResponse path := "/api/2.1preview/clusters/start" queryParams := make(map[string]any) @@ -414,7 +415,7 @@ func (a *clustersPreviewImpl) Start(ctx context.Context, request StartCluster) e return err } -func (a *clustersPreviewImpl) Unpin(ctx context.Context, request UnpinCluster) error { +func (a *clustersImpl) Unpin(ctx context.Context, request UnpinCluster) error { var unpinClusterResponse UnpinClusterResponse path := "/api/2.1preview/clusters/unpin" queryParams := make(map[string]any) @@ -425,7 +426,7 @@ func (a *clustersPreviewImpl) Unpin(ctx context.Context, request UnpinCluster) e return err } -func (a *clustersPreviewImpl) Update(ctx context.Context, request UpdateCluster) error { +func (a *clustersImpl) Update(ctx context.Context, request UpdateCluster) error { var updateClusterResponse UpdateClusterResponse path := "/api/2.1preview/clusters/update" queryParams := make(map[string]any) @@ -436,7 +437,7 @@ func (a *clustersPreviewImpl) Update(ctx context.Context, request UpdateCluster) return err } -func (a *clustersPreviewImpl) UpdatePermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error) { +func (a *clustersImpl) UpdatePermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error) { var clusterPermissions ClusterPermissions path := fmt.Sprintf("/api/2.0preview/permissions/clusters/%v", request.ClusterId) queryParams := make(map[string]any) @@ -447,12 +448,12 @@ func (a *clustersPreviewImpl) UpdatePermissions(ctx context.Context, request Clu return &clusterPermissions, err } -// unexported type that holds implementations of just CommandExecutionPreview API methods -type commandExecutionPreviewImpl struct { +// unexported type that holds implementations of just CommandExecution API methods +type commandExecutionImpl struct { client *client.DatabricksClient } -func (a *commandExecutionPreviewImpl) Cancel(ctx context.Context, request CancelCommand) error { +func (a *commandExecutionImpl) Cancel(ctx context.Context, request CancelCommand) error { var cancelResponse CancelResponse path := "/api/1.2preview/commands/cancel" queryParams := make(map[string]any) @@ -463,7 +464,7 @@ func (a *commandExecutionPreviewImpl) Cancel(ctx context.Context, request Cancel return err } -func (a *commandExecutionPreviewImpl) CommandStatus(ctx context.Context, request CommandStatusRequest) (*CommandStatusResponse, error) { +func (a *commandExecutionImpl) CommandStatus(ctx context.Context, request CommandStatusRequest) (*CommandStatusResponse, error) { var commandStatusResponse CommandStatusResponse path := "/api/1.2preview/commands/status" queryParams := make(map[string]any) @@ -473,7 +474,7 @@ func (a *commandExecutionPreviewImpl) CommandStatus(ctx context.Context, request return &commandStatusResponse, err } -func (a *commandExecutionPreviewImpl) ContextStatus(ctx context.Context, request ContextStatusRequest) (*ContextStatusResponse, error) { +func (a *commandExecutionImpl) ContextStatus(ctx context.Context, request ContextStatusRequest) (*ContextStatusResponse, error) { var contextStatusResponse ContextStatusResponse path := "/api/1.2preview/contexts/status" queryParams := make(map[string]any) @@ -483,7 +484,7 @@ func (a *commandExecutionPreviewImpl) ContextStatus(ctx context.Context, request return &contextStatusResponse, err } -func (a *commandExecutionPreviewImpl) Create(ctx context.Context, request CreateContext) (*Created, error) { +func (a *commandExecutionImpl) Create(ctx context.Context, request CreateContext) (*Created, error) { var created Created path := "/api/1.2preview/contexts/create" queryParams := make(map[string]any) @@ -494,7 +495,7 @@ func (a *commandExecutionPreviewImpl) Create(ctx context.Context, request Create return &created, err } -func (a *commandExecutionPreviewImpl) Destroy(ctx context.Context, request DestroyContext) error { +func (a *commandExecutionImpl) Destroy(ctx context.Context, request DestroyContext) error { var destroyResponse DestroyResponse path := "/api/1.2preview/contexts/destroy" queryParams := make(map[string]any) @@ -505,7 +506,7 @@ func (a *commandExecutionPreviewImpl) Destroy(ctx context.Context, request Destr return err } -func (a *commandExecutionPreviewImpl) Execute(ctx context.Context, request Command) (*Created, error) { +func (a *commandExecutionImpl) Execute(ctx context.Context, request Command) (*Created, error) { var created Created path := "/api/1.2preview/commands/execute" queryParams := make(map[string]any) @@ -516,12 +517,12 @@ func (a *commandExecutionPreviewImpl) Execute(ctx context.Context, request Comma return &created, err } -// unexported type that holds implementations of just GlobalInitScriptsPreview API methods -type globalInitScriptsPreviewImpl struct { +// unexported type that holds implementations of just GlobalInitScripts API methods +type globalInitScriptsImpl struct { client *client.DatabricksClient } -func (a *globalInitScriptsPreviewImpl) Create(ctx context.Context, request GlobalInitScriptCreateRequest) (*CreateResponse, error) { +func (a *globalInitScriptsImpl) Create(ctx context.Context, request GlobalInitScriptCreateRequest) (*CreateResponse, error) { var createResponse CreateResponse path := "/api/2.0preview/global-init-scripts" queryParams := make(map[string]any) @@ -532,7 +533,7 @@ func (a *globalInitScriptsPreviewImpl) Create(ctx context.Context, request Globa return &createResponse, err } -func (a *globalInitScriptsPreviewImpl) Delete(ctx context.Context, request DeleteGlobalInitScriptRequest) error { +func (a *globalInitScriptsImpl) Delete(ctx context.Context, request DeleteGlobalInitScriptRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/global-init-scripts/%v", request.ScriptId) queryParams := make(map[string]any) @@ -541,7 +542,7 @@ func (a *globalInitScriptsPreviewImpl) Delete(ctx context.Context, request Delet return err } -func (a *globalInitScriptsPreviewImpl) Get(ctx context.Context, request GetGlobalInitScriptRequest) (*GlobalInitScriptDetailsWithContent, error) { +func (a *globalInitScriptsImpl) Get(ctx context.Context, request GetGlobalInitScriptRequest) (*GlobalInitScriptDetailsWithContent, error) { var globalInitScriptDetailsWithContent GlobalInitScriptDetailsWithContent path := fmt.Sprintf("/api/2.0preview/global-init-scripts/%v", request.ScriptId) queryParams := make(map[string]any) @@ -557,7 +558,7 @@ func (a *globalInitScriptsPreviewImpl) Get(ctx context.Context, request GetGloba // properties for each script but **not** the script contents. To retrieve the // contents of a script, use the [get a global init // script](:method:globalinitscripts/get) operation. -func (a *globalInitScriptsPreviewImpl) List(ctx context.Context) listing.Iterator[GlobalInitScriptDetails] { +func (a *globalInitScriptsImpl) List(ctx context.Context) listing.Iterator[GlobalInitScriptDetails] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*ListGlobalInitScriptsResponse, error) { @@ -582,11 +583,11 @@ func (a *globalInitScriptsPreviewImpl) List(ctx context.Context) listing.Iterato // properties for each script but **not** the script contents. To retrieve the // contents of a script, use the [get a global init // script](:method:globalinitscripts/get) operation. -func (a *globalInitScriptsPreviewImpl) ListAll(ctx context.Context) ([]GlobalInitScriptDetails, error) { +func (a *globalInitScriptsImpl) ListAll(ctx context.Context) ([]GlobalInitScriptDetails, error) { iterator := a.List(ctx) return listing.ToSlice[GlobalInitScriptDetails](ctx, iterator) } -func (a *globalInitScriptsPreviewImpl) internalList(ctx context.Context) (*ListGlobalInitScriptsResponse, error) { +func (a *globalInitScriptsImpl) internalList(ctx context.Context) (*ListGlobalInitScriptsResponse, error) { var listGlobalInitScriptsResponse ListGlobalInitScriptsResponse path := "/api/2.0preview/global-init-scripts" @@ -596,7 +597,7 @@ func (a *globalInitScriptsPreviewImpl) internalList(ctx context.Context) (*ListG return &listGlobalInitScriptsResponse, err } -func (a *globalInitScriptsPreviewImpl) Update(ctx context.Context, request GlobalInitScriptUpdateRequest) error { +func (a *globalInitScriptsImpl) Update(ctx context.Context, request GlobalInitScriptUpdateRequest) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0preview/global-init-scripts/%v", request.ScriptId) queryParams := make(map[string]any) @@ -606,12 +607,12 @@ func (a *globalInitScriptsPreviewImpl) Update(ctx context.Context, request Globa return err } -// unexported type that holds implementations of just InstancePoolsPreview API methods -type instancePoolsPreviewImpl struct { +// unexported type that holds implementations of just InstancePools API methods +type instancePoolsImpl struct { client *client.DatabricksClient } -func (a *instancePoolsPreviewImpl) Create(ctx context.Context, request CreateInstancePool) (*CreateInstancePoolResponse, error) { +func (a *instancePoolsImpl) Create(ctx context.Context, request CreateInstancePool) (*CreateInstancePoolResponse, error) { var createInstancePoolResponse CreateInstancePoolResponse path := "/api/2.0preview/instance-pools/create" queryParams := make(map[string]any) @@ -622,7 +623,7 @@ func (a *instancePoolsPreviewImpl) Create(ctx context.Context, request CreateIns return &createInstancePoolResponse, err } -func (a *instancePoolsPreviewImpl) Delete(ctx context.Context, request DeleteInstancePool) error { +func (a *instancePoolsImpl) Delete(ctx context.Context, request DeleteInstancePool) error { var deleteInstancePoolResponse DeleteInstancePoolResponse path := "/api/2.0preview/instance-pools/delete" queryParams := make(map[string]any) @@ -633,7 +634,7 @@ func (a *instancePoolsPreviewImpl) Delete(ctx context.Context, request DeleteIns return err } -func (a *instancePoolsPreviewImpl) Edit(ctx context.Context, request EditInstancePool) error { +func (a *instancePoolsImpl) Edit(ctx context.Context, request EditInstancePool) error { var editInstancePoolResponse EditInstancePoolResponse path := "/api/2.0preview/instance-pools/edit" queryParams := make(map[string]any) @@ -644,7 +645,7 @@ func (a *instancePoolsPreviewImpl) Edit(ctx context.Context, request EditInstanc return err } -func (a *instancePoolsPreviewImpl) Get(ctx context.Context, request GetInstancePoolRequest) (*GetInstancePool, error) { +func (a *instancePoolsImpl) Get(ctx context.Context, request GetInstancePoolRequest) (*GetInstancePool, error) { var getInstancePool GetInstancePool path := "/api/2.0preview/instance-pools/get" queryParams := make(map[string]any) @@ -654,7 +655,7 @@ func (a *instancePoolsPreviewImpl) Get(ctx context.Context, request GetInstanceP return &getInstancePool, err } -func (a *instancePoolsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetInstancePoolPermissionLevelsRequest) (*GetInstancePoolPermissionLevelsResponse, error) { +func (a *instancePoolsImpl) GetPermissionLevels(ctx context.Context, request GetInstancePoolPermissionLevelsRequest) (*GetInstancePoolPermissionLevelsResponse, error) { var getInstancePoolPermissionLevelsResponse GetInstancePoolPermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/instance-pools/%v/permissionLevels", request.InstancePoolId) queryParams := make(map[string]any) @@ -664,7 +665,7 @@ func (a *instancePoolsPreviewImpl) GetPermissionLevels(ctx context.Context, requ return &getInstancePoolPermissionLevelsResponse, err } -func (a *instancePoolsPreviewImpl) GetPermissions(ctx context.Context, request GetInstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { +func (a *instancePoolsImpl) GetPermissions(ctx context.Context, request GetInstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { var instancePoolPermissions InstancePoolPermissions path := fmt.Sprintf("/api/2.0preview/permissions/instance-pools/%v", request.InstancePoolId) queryParams := make(map[string]any) @@ -677,7 +678,7 @@ func (a *instancePoolsPreviewImpl) GetPermissions(ctx context.Context, request G // List instance pool info. // // Gets a list of instance pools with their statistics. -func (a *instancePoolsPreviewImpl) List(ctx context.Context) listing.Iterator[InstancePoolAndStats] { +func (a *instancePoolsImpl) List(ctx context.Context) listing.Iterator[InstancePoolAndStats] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*ListInstancePools, error) { @@ -699,11 +700,11 @@ func (a *instancePoolsPreviewImpl) List(ctx context.Context) listing.Iterator[In // List instance pool info. // // Gets a list of instance pools with their statistics. -func (a *instancePoolsPreviewImpl) ListAll(ctx context.Context) ([]InstancePoolAndStats, error) { +func (a *instancePoolsImpl) ListAll(ctx context.Context) ([]InstancePoolAndStats, error) { iterator := a.List(ctx) return listing.ToSlice[InstancePoolAndStats](ctx, iterator) } -func (a *instancePoolsPreviewImpl) internalList(ctx context.Context) (*ListInstancePools, error) { +func (a *instancePoolsImpl) internalList(ctx context.Context) (*ListInstancePools, error) { var listInstancePools ListInstancePools path := "/api/2.0preview/instance-pools/list" @@ -713,7 +714,7 @@ func (a *instancePoolsPreviewImpl) internalList(ctx context.Context) (*ListInsta return &listInstancePools, err } -func (a *instancePoolsPreviewImpl) SetPermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { +func (a *instancePoolsImpl) SetPermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { var instancePoolPermissions InstancePoolPermissions path := fmt.Sprintf("/api/2.0preview/permissions/instance-pools/%v", request.InstancePoolId) queryParams := make(map[string]any) @@ -724,7 +725,7 @@ func (a *instancePoolsPreviewImpl) SetPermissions(ctx context.Context, request I return &instancePoolPermissions, err } -func (a *instancePoolsPreviewImpl) UpdatePermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { +func (a *instancePoolsImpl) UpdatePermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { var instancePoolPermissions InstancePoolPermissions path := fmt.Sprintf("/api/2.0preview/permissions/instance-pools/%v", request.InstancePoolId) queryParams := make(map[string]any) @@ -735,12 +736,12 @@ func (a *instancePoolsPreviewImpl) UpdatePermissions(ctx context.Context, reques return &instancePoolPermissions, err } -// unexported type that holds implementations of just InstanceProfilesPreview API methods -type instanceProfilesPreviewImpl struct { +// unexported type that holds implementations of just InstanceProfiles API methods +type instanceProfilesImpl struct { client *client.DatabricksClient } -func (a *instanceProfilesPreviewImpl) Add(ctx context.Context, request AddInstanceProfile) error { +func (a *instanceProfilesImpl) Add(ctx context.Context, request AddInstanceProfile) error { var addResponse AddResponse path := "/api/2.0preview/instance-profiles/add" queryParams := make(map[string]any) @@ -751,7 +752,7 @@ func (a *instanceProfilesPreviewImpl) Add(ctx context.Context, request AddInstan return err } -func (a *instanceProfilesPreviewImpl) Edit(ctx context.Context, request InstanceProfile) error { +func (a *instanceProfilesImpl) Edit(ctx context.Context, request InstanceProfile) error { var editResponse EditResponse path := "/api/2.0preview/instance-profiles/edit" queryParams := make(map[string]any) @@ -767,7 +768,7 @@ func (a *instanceProfilesPreviewImpl) Edit(ctx context.Context, request Instance // List the instance profiles that the calling user can use to launch a cluster. // // This API is available to all users. -func (a *instanceProfilesPreviewImpl) List(ctx context.Context) listing.Iterator[InstanceProfile] { +func (a *instanceProfilesImpl) List(ctx context.Context) listing.Iterator[InstanceProfile] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*ListInstanceProfilesResponse, error) { @@ -791,11 +792,11 @@ func (a *instanceProfilesPreviewImpl) List(ctx context.Context) listing.Iterator // List the instance profiles that the calling user can use to launch a cluster. // // This API is available to all users. -func (a *instanceProfilesPreviewImpl) ListAll(ctx context.Context) ([]InstanceProfile, error) { +func (a *instanceProfilesImpl) ListAll(ctx context.Context) ([]InstanceProfile, error) { iterator := a.List(ctx) return listing.ToSlice[InstanceProfile](ctx, iterator) } -func (a *instanceProfilesPreviewImpl) internalList(ctx context.Context) (*ListInstanceProfilesResponse, error) { +func (a *instanceProfilesImpl) internalList(ctx context.Context) (*ListInstanceProfilesResponse, error) { var listInstanceProfilesResponse ListInstanceProfilesResponse path := "/api/2.0preview/instance-profiles/list" @@ -805,7 +806,7 @@ func (a *instanceProfilesPreviewImpl) internalList(ctx context.Context) (*ListIn return &listInstanceProfilesResponse, err } -func (a *instanceProfilesPreviewImpl) Remove(ctx context.Context, request RemoveInstanceProfile) error { +func (a *instanceProfilesImpl) Remove(ctx context.Context, request RemoveInstanceProfile) error { var removeResponse RemoveResponse path := "/api/2.0preview/instance-profiles/remove" queryParams := make(map[string]any) @@ -816,8 +817,8 @@ func (a *instanceProfilesPreviewImpl) Remove(ctx context.Context, request Remove return err } -// unexported type that holds implementations of just LibrariesPreview API methods -type librariesPreviewImpl struct { +// unexported type that holds implementations of just Libraries API methods +type librariesImpl struct { client *client.DatabricksClient } @@ -825,7 +826,7 @@ type librariesPreviewImpl struct { // // Get the status of all libraries on all clusters. A status is returned for all // libraries installed on this cluster via the API or the libraries UI. -func (a *librariesPreviewImpl) AllClusterStatuses(ctx context.Context) listing.Iterator[ClusterLibraryStatuses] { +func (a *librariesImpl) AllClusterStatuses(ctx context.Context) listing.Iterator[ClusterLibraryStatuses] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*ListAllClusterLibraryStatusesResponse, error) { @@ -848,11 +849,11 @@ func (a *librariesPreviewImpl) AllClusterStatuses(ctx context.Context) listing.I // // Get the status of all libraries on all clusters. A status is returned for all // libraries installed on this cluster via the API or the libraries UI. -func (a *librariesPreviewImpl) AllClusterStatusesAll(ctx context.Context) ([]ClusterLibraryStatuses, error) { +func (a *librariesImpl) AllClusterStatusesAll(ctx context.Context) ([]ClusterLibraryStatuses, error) { iterator := a.AllClusterStatuses(ctx) return listing.ToSlice[ClusterLibraryStatuses](ctx, iterator) } -func (a *librariesPreviewImpl) internalAllClusterStatuses(ctx context.Context) (*ListAllClusterLibraryStatusesResponse, error) { +func (a *librariesImpl) internalAllClusterStatuses(ctx context.Context) (*ListAllClusterLibraryStatusesResponse, error) { var listAllClusterLibraryStatusesResponse ListAllClusterLibraryStatusesResponse path := "/api/2.0preview/libraries/all-cluster-statuses" @@ -871,7 +872,7 @@ func (a *librariesPreviewImpl) internalAllClusterStatuses(ctx context.Context) ( // are returned first. 2. Libraries that were previously requested to be // installed on this cluster or, but are now marked for removal, in no // particular order, are returned last. -func (a *librariesPreviewImpl) ClusterStatus(ctx context.Context, request ClusterStatus) listing.Iterator[LibraryFullStatus] { +func (a *librariesImpl) ClusterStatus(ctx context.Context, request ClusterStatus) listing.Iterator[LibraryFullStatus] { getNextPage := func(ctx context.Context, req ClusterStatus) (*ClusterLibraryStatuses, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -898,11 +899,11 @@ func (a *librariesPreviewImpl) ClusterStatus(ctx context.Context, request Cluste // are returned first. 2. Libraries that were previously requested to be // installed on this cluster or, but are now marked for removal, in no // particular order, are returned last. -func (a *librariesPreviewImpl) ClusterStatusAll(ctx context.Context, request ClusterStatus) ([]LibraryFullStatus, error) { +func (a *librariesImpl) ClusterStatusAll(ctx context.Context, request ClusterStatus) ([]LibraryFullStatus, error) { iterator := a.ClusterStatus(ctx, request) return listing.ToSlice[LibraryFullStatus](ctx, iterator) } -func (a *librariesPreviewImpl) internalClusterStatus(ctx context.Context, request ClusterStatus) (*ClusterLibraryStatuses, error) { +func (a *librariesImpl) internalClusterStatus(ctx context.Context, request ClusterStatus) (*ClusterLibraryStatuses, error) { var clusterLibraryStatuses ClusterLibraryStatuses path := "/api/2.0preview/libraries/cluster-status" queryParams := make(map[string]any) @@ -912,7 +913,7 @@ func (a *librariesPreviewImpl) internalClusterStatus(ctx context.Context, reques return &clusterLibraryStatuses, err } -func (a *librariesPreviewImpl) Install(ctx context.Context, request InstallLibraries) error { +func (a *librariesImpl) Install(ctx context.Context, request InstallLibraries) error { var installLibrariesResponse InstallLibrariesResponse path := "/api/2.0preview/libraries/install" queryParams := make(map[string]any) @@ -923,7 +924,7 @@ func (a *librariesPreviewImpl) Install(ctx context.Context, request InstallLibra return err } -func (a *librariesPreviewImpl) Uninstall(ctx context.Context, request UninstallLibraries) error { +func (a *librariesImpl) Uninstall(ctx context.Context, request UninstallLibraries) error { var uninstallLibrariesResponse UninstallLibrariesResponse path := "/api/2.0preview/libraries/uninstall" queryParams := make(map[string]any) @@ -934,12 +935,12 @@ func (a *librariesPreviewImpl) Uninstall(ctx context.Context, request UninstallL return err } -// unexported type that holds implementations of just PolicyComplianceForClustersPreview API methods -type policyComplianceForClustersPreviewImpl struct { +// unexported type that holds implementations of just PolicyComplianceForClusters API methods +type policyComplianceForClustersImpl struct { client *client.DatabricksClient } -func (a *policyComplianceForClustersPreviewImpl) EnforceCompliance(ctx context.Context, request EnforceClusterComplianceRequest) (*EnforceClusterComplianceResponse, error) { +func (a *policyComplianceForClustersImpl) EnforceCompliance(ctx context.Context, request EnforceClusterComplianceRequest) (*EnforceClusterComplianceResponse, error) { var enforceClusterComplianceResponse EnforceClusterComplianceResponse path := "/api/2.0preview/policies/clusters/enforce-compliance" queryParams := make(map[string]any) @@ -950,7 +951,7 @@ func (a *policyComplianceForClustersPreviewImpl) EnforceCompliance(ctx context.C return &enforceClusterComplianceResponse, err } -func (a *policyComplianceForClustersPreviewImpl) GetCompliance(ctx context.Context, request GetClusterComplianceRequest) (*GetClusterComplianceResponse, error) { +func (a *policyComplianceForClustersImpl) GetCompliance(ctx context.Context, request GetClusterComplianceRequest) (*GetClusterComplianceResponse, error) { var getClusterComplianceResponse GetClusterComplianceResponse path := "/api/2.0preview/policies/clusters/get-compliance" queryParams := make(map[string]any) @@ -965,7 +966,7 @@ func (a *policyComplianceForClustersPreviewImpl) GetCompliance(ctx context.Conte // Returns the policy compliance status of all clusters that use a given policy. // Clusters could be out of compliance if their policy was updated after the // cluster was last edited. -func (a *policyComplianceForClustersPreviewImpl) ListCompliance(ctx context.Context, request ListClusterCompliancesRequest) listing.Iterator[ClusterCompliance] { +func (a *policyComplianceForClustersImpl) ListCompliance(ctx context.Context, request ListClusterCompliancesRequest) listing.Iterator[ClusterCompliance] { getNextPage := func(ctx context.Context, req ListClusterCompliancesRequest) (*ListClusterCompliancesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -994,11 +995,11 @@ func (a *policyComplianceForClustersPreviewImpl) ListCompliance(ctx context.Cont // Returns the policy compliance status of all clusters that use a given policy. // Clusters could be out of compliance if their policy was updated after the // cluster was last edited. -func (a *policyComplianceForClustersPreviewImpl) ListComplianceAll(ctx context.Context, request ListClusterCompliancesRequest) ([]ClusterCompliance, error) { +func (a *policyComplianceForClustersImpl) ListComplianceAll(ctx context.Context, request ListClusterCompliancesRequest) ([]ClusterCompliance, error) { iterator := a.ListCompliance(ctx, request) return listing.ToSlice[ClusterCompliance](ctx, iterator) } -func (a *policyComplianceForClustersPreviewImpl) internalListCompliance(ctx context.Context, request ListClusterCompliancesRequest) (*ListClusterCompliancesResponse, error) { +func (a *policyComplianceForClustersImpl) internalListCompliance(ctx context.Context, request ListClusterCompliancesRequest) (*ListClusterCompliancesResponse, error) { var listClusterCompliancesResponse ListClusterCompliancesResponse path := "/api/2.0preview/policies/clusters/list-compliance" queryParams := make(map[string]any) @@ -1008,12 +1009,12 @@ func (a *policyComplianceForClustersPreviewImpl) internalListCompliance(ctx cont return &listClusterCompliancesResponse, err } -// unexported type that holds implementations of just PolicyFamiliesPreview API methods -type policyFamiliesPreviewImpl struct { +// unexported type that holds implementations of just PolicyFamilies API methods +type policyFamiliesImpl struct { client *client.DatabricksClient } -func (a *policyFamiliesPreviewImpl) Get(ctx context.Context, request GetPolicyFamilyRequest) (*PolicyFamily, error) { +func (a *policyFamiliesImpl) Get(ctx context.Context, request GetPolicyFamilyRequest) (*PolicyFamily, error) { var policyFamily PolicyFamily path := fmt.Sprintf("/api/2.0preview/policy-families/%v", request.PolicyFamilyId) queryParams := make(map[string]any) @@ -1027,7 +1028,7 @@ func (a *policyFamiliesPreviewImpl) Get(ctx context.Context, request GetPolicyFa // // Returns the list of policy definition types available to use at their latest // version. This API is paginated. -func (a *policyFamiliesPreviewImpl) List(ctx context.Context, request ListPolicyFamiliesRequest) listing.Iterator[PolicyFamily] { +func (a *policyFamiliesImpl) List(ctx context.Context, request ListPolicyFamiliesRequest) listing.Iterator[PolicyFamily] { getNextPage := func(ctx context.Context, req ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1055,11 +1056,11 @@ func (a *policyFamiliesPreviewImpl) List(ctx context.Context, request ListPolicy // // Returns the list of policy definition types available to use at their latest // version. This API is paginated. -func (a *policyFamiliesPreviewImpl) ListAll(ctx context.Context, request ListPolicyFamiliesRequest) ([]PolicyFamily, error) { +func (a *policyFamiliesImpl) ListAll(ctx context.Context, request ListPolicyFamiliesRequest) ([]PolicyFamily, error) { iterator := a.List(ctx, request) return listing.ToSlice[PolicyFamily](ctx, iterator) } -func (a *policyFamiliesPreviewImpl) internalList(ctx context.Context, request ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error) { +func (a *policyFamiliesImpl) internalList(ctx context.Context, request ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error) { var listPolicyFamiliesResponse ListPolicyFamiliesResponse path := "/api/2.0preview/policy-families" queryParams := make(map[string]any) diff --git a/dashboards/v2preview/api.go b/dashboards/v2preview/api.go index 04627c7d3..8df1e3dba 100755 --- a/dashboards/v2preview/api.go +++ b/dashboards/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Genie Preview, Lakeview Embedded Preview, Lakeview Preview, Query Execution Preview, etc. +// These APIs allow you to manage Genie, Lakeview, Lakeview Embedded, Query Execution, etc. package dashboardspreview import ( @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/listing" ) -type GeniePreviewInterface interface { +type GenieInterface interface { // Create conversation message. // @@ -65,9 +65,9 @@ type GeniePreviewInterface interface { StartConversation(ctx context.Context, request GenieStartConversationMessageRequest) (*GenieStartConversationResponse, error) } -func NewGeniePreview(client *client.DatabricksClient) *GeniePreviewAPI { - return &GeniePreviewAPI{ - geniePreviewImpl: geniePreviewImpl{ +func NewGenie(client *client.DatabricksClient) *GenieAPI { + return &GenieAPI{ + genieImpl: genieImpl{ client: client, }, } @@ -78,15 +78,15 @@ func NewGeniePreview(client *client.DatabricksClient) *GeniePreviewAPI { // natural language. Genie uses data registered to Unity Catalog and requires at // least CAN USE permission on a Pro or Serverless SQL warehouse. Also, // Databricks Assistant must be enabled. -type GeniePreviewAPI struct { - geniePreviewImpl +type GenieAPI struct { + genieImpl } // Get conversation message. // // Get message from conversation. -func (a *GeniePreviewAPI) GetMessageBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieMessage, error) { - return a.geniePreviewImpl.GetMessage(ctx, GenieGetConversationMessageRequest{ +func (a *GenieAPI) GetMessageBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieMessage, error) { + return a.genieImpl.GetMessage(ctx, GenieGetConversationMessageRequest{ SpaceId: spaceId, ConversationId: conversationId, MessageId: messageId, @@ -98,8 +98,8 @@ func (a *GeniePreviewAPI) GetMessageBySpaceIdAndConversationIdAndMessageId(ctx c // Get the result of SQL query if the message has a query attachment. This is // only available if a message has a query attachment and the message status is // `EXECUTING_QUERY`. -func (a *GeniePreviewAPI) GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieGetMessageQueryResultResponse, error) { - return a.geniePreviewImpl.GetMessageQueryResult(ctx, GenieGetMessageQueryResultRequest{ +func (a *GenieAPI) GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieGetMessageQueryResultResponse, error) { + return a.genieImpl.GetMessageQueryResult(ctx, GenieGetMessageQueryResultRequest{ SpaceId: spaceId, ConversationId: conversationId, MessageId: messageId, @@ -110,8 +110,8 @@ func (a *GeniePreviewAPI) GetMessageQueryResultBySpaceIdAndConversationIdAndMess // // Get the result of SQL query by attachment id This is only available if a // message has a query attachment and the message status is `EXECUTING_QUERY`. -func (a *GeniePreviewAPI) GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*GenieGetMessageQueryResultResponse, error) { - return a.geniePreviewImpl.GetMessageQueryResultByAttachment(ctx, GenieGetQueryResultByAttachmentRequest{ +func (a *GenieAPI) GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*GenieGetMessageQueryResultResponse, error) { + return a.genieImpl.GetMessageQueryResultByAttachment(ctx, GenieGetQueryResultByAttachmentRequest{ SpaceId: spaceId, ConversationId: conversationId, MessageId: messageId, @@ -119,42 +119,7 @@ func (a *GeniePreviewAPI) GetMessageQueryResultByAttachmentBySpaceIdAndConversat }) } -type LakeviewEmbeddedPreviewInterface interface { - - // Read a published dashboard in an embedded ui. - // - // Get the current published dashboard within an embedded context. - GetPublishedDashboardEmbedded(ctx context.Context, request GetPublishedDashboardEmbeddedRequest) error - - // Read a published dashboard in an embedded ui. - // - // Get the current published dashboard within an embedded context. - GetPublishedDashboardEmbeddedByDashboardId(ctx context.Context, dashboardId string) error -} - -func NewLakeviewEmbeddedPreview(client *client.DatabricksClient) *LakeviewEmbeddedPreviewAPI { - return &LakeviewEmbeddedPreviewAPI{ - lakeviewEmbeddedPreviewImpl: lakeviewEmbeddedPreviewImpl{ - client: client, - }, - } -} - -// Token-based Lakeview APIs for embedding dashboards in external applications. -type LakeviewEmbeddedPreviewAPI struct { - lakeviewEmbeddedPreviewImpl -} - -// Read a published dashboard in an embedded ui. -// -// Get the current published dashboard within an embedded context. -func (a *LakeviewEmbeddedPreviewAPI) GetPublishedDashboardEmbeddedByDashboardId(ctx context.Context, dashboardId string) error { - return a.lakeviewEmbeddedPreviewImpl.GetPublishedDashboardEmbedded(ctx, GetPublishedDashboardEmbeddedRequest{ - DashboardId: dashboardId, - }) -} - -type LakeviewPreviewInterface interface { +type LakeviewInterface interface { // Create dashboard. // @@ -286,9 +251,9 @@ type LakeviewPreviewInterface interface { UpdateSchedule(ctx context.Context, request UpdateScheduleRequest) (*Schedule, error) } -func NewLakeviewPreview(client *client.DatabricksClient) *LakeviewPreviewAPI { - return &LakeviewPreviewAPI{ - lakeviewPreviewImpl: lakeviewPreviewImpl{ +func NewLakeview(client *client.DatabricksClient) *LakeviewAPI { + return &LakeviewAPI{ + lakeviewImpl: lakeviewImpl{ client: client, }, } @@ -297,21 +262,21 @@ func NewLakeviewPreview(client *client.DatabricksClient) *LakeviewPreviewAPI { // These APIs provide specific management operations for Lakeview dashboards. // Generic resource management can be done with Workspace API (import, export, // get-status, list, delete). -type LakeviewPreviewAPI struct { - lakeviewPreviewImpl +type LakeviewAPI struct { + lakeviewImpl } // Delete dashboard schedule. -func (a *LakeviewPreviewAPI) DeleteScheduleByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) error { - return a.lakeviewPreviewImpl.DeleteSchedule(ctx, DeleteScheduleRequest{ +func (a *LakeviewAPI) DeleteScheduleByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) error { + return a.lakeviewImpl.DeleteSchedule(ctx, DeleteScheduleRequest{ DashboardId: dashboardId, ScheduleId: scheduleId, }) } // Delete schedule subscription. -func (a *LakeviewPreviewAPI) DeleteSubscriptionByDashboardIdAndScheduleIdAndSubscriptionId(ctx context.Context, dashboardId string, scheduleId string, subscriptionId string) error { - return a.lakeviewPreviewImpl.DeleteSubscription(ctx, DeleteSubscriptionRequest{ +func (a *LakeviewAPI) DeleteSubscriptionByDashboardIdAndScheduleIdAndSubscriptionId(ctx context.Context, dashboardId string, scheduleId string, subscriptionId string) error { + return a.lakeviewImpl.DeleteSubscription(ctx, DeleteSubscriptionRequest{ DashboardId: dashboardId, ScheduleId: scheduleId, SubscriptionId: subscriptionId, @@ -321,8 +286,8 @@ func (a *LakeviewPreviewAPI) DeleteSubscriptionByDashboardIdAndScheduleIdAndSubs // Get dashboard. // // Get a draft dashboard. -func (a *LakeviewPreviewAPI) GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error) { - return a.lakeviewPreviewImpl.Get(ctx, GetDashboardRequest{ +func (a *LakeviewAPI) GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error) { + return a.lakeviewImpl.Get(ctx, GetDashboardRequest{ DashboardId: dashboardId, }) } @@ -330,23 +295,23 @@ func (a *LakeviewPreviewAPI) GetByDashboardId(ctx context.Context, dashboardId s // Get published dashboard. // // Get the current published dashboard. -func (a *LakeviewPreviewAPI) GetPublishedByDashboardId(ctx context.Context, dashboardId string) (*PublishedDashboard, error) { - return a.lakeviewPreviewImpl.GetPublished(ctx, GetPublishedDashboardRequest{ +func (a *LakeviewAPI) GetPublishedByDashboardId(ctx context.Context, dashboardId string) (*PublishedDashboard, error) { + return a.lakeviewImpl.GetPublished(ctx, GetPublishedDashboardRequest{ DashboardId: dashboardId, }) } // Get dashboard schedule. -func (a *LakeviewPreviewAPI) GetScheduleByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) (*Schedule, error) { - return a.lakeviewPreviewImpl.GetSchedule(ctx, GetScheduleRequest{ +func (a *LakeviewAPI) GetScheduleByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) (*Schedule, error) { + return a.lakeviewImpl.GetSchedule(ctx, GetScheduleRequest{ DashboardId: dashboardId, ScheduleId: scheduleId, }) } // Get schedule subscription. -func (a *LakeviewPreviewAPI) GetSubscriptionByDashboardIdAndScheduleIdAndSubscriptionId(ctx context.Context, dashboardId string, scheduleId string, subscriptionId string) (*Subscription, error) { - return a.lakeviewPreviewImpl.GetSubscription(ctx, GetSubscriptionRequest{ +func (a *LakeviewAPI) GetSubscriptionByDashboardIdAndScheduleIdAndSubscriptionId(ctx context.Context, dashboardId string, scheduleId string, subscriptionId string) (*Subscription, error) { + return a.lakeviewImpl.GetSubscription(ctx, GetSubscriptionRequest{ DashboardId: dashboardId, ScheduleId: scheduleId, SubscriptionId: subscriptionId, @@ -354,15 +319,15 @@ func (a *LakeviewPreviewAPI) GetSubscriptionByDashboardIdAndScheduleIdAndSubscri } // List dashboard schedules. -func (a *LakeviewPreviewAPI) ListSchedulesByDashboardId(ctx context.Context, dashboardId string) (*ListSchedulesResponse, error) { - return a.lakeviewPreviewImpl.internalListSchedules(ctx, ListSchedulesRequest{ +func (a *LakeviewAPI) ListSchedulesByDashboardId(ctx context.Context, dashboardId string) (*ListSchedulesResponse, error) { + return a.lakeviewImpl.internalListSchedules(ctx, ListSchedulesRequest{ DashboardId: dashboardId, }) } // List schedule subscriptions. -func (a *LakeviewPreviewAPI) ListSubscriptionsByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) (*ListSubscriptionsResponse, error) { - return a.lakeviewPreviewImpl.internalListSubscriptions(ctx, ListSubscriptionsRequest{ +func (a *LakeviewAPI) ListSubscriptionsByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) (*ListSubscriptionsResponse, error) { + return a.lakeviewImpl.internalListSubscriptions(ctx, ListSubscriptionsRequest{ DashboardId: dashboardId, ScheduleId: scheduleId, }) @@ -371,8 +336,8 @@ func (a *LakeviewPreviewAPI) ListSubscriptionsByDashboardIdAndScheduleId(ctx con // Trash dashboard. // // Trash a dashboard. -func (a *LakeviewPreviewAPI) TrashByDashboardId(ctx context.Context, dashboardId string) error { - return a.lakeviewPreviewImpl.Trash(ctx, TrashDashboardRequest{ +func (a *LakeviewAPI) TrashByDashboardId(ctx context.Context, dashboardId string) error { + return a.lakeviewImpl.Trash(ctx, TrashDashboardRequest{ DashboardId: dashboardId, }) } @@ -380,13 +345,48 @@ func (a *LakeviewPreviewAPI) TrashByDashboardId(ctx context.Context, dashboardId // Unpublish dashboard. // // Unpublish the dashboard. -func (a *LakeviewPreviewAPI) UnpublishByDashboardId(ctx context.Context, dashboardId string) error { - return a.lakeviewPreviewImpl.Unpublish(ctx, UnpublishDashboardRequest{ +func (a *LakeviewAPI) UnpublishByDashboardId(ctx context.Context, dashboardId string) error { + return a.lakeviewImpl.Unpublish(ctx, UnpublishDashboardRequest{ + DashboardId: dashboardId, + }) +} + +type LakeviewEmbeddedInterface interface { + + // Read a published dashboard in an embedded ui. + // + // Get the current published dashboard within an embedded context. + GetPublishedDashboardEmbedded(ctx context.Context, request GetPublishedDashboardEmbeddedRequest) error + + // Read a published dashboard in an embedded ui. + // + // Get the current published dashboard within an embedded context. + GetPublishedDashboardEmbeddedByDashboardId(ctx context.Context, dashboardId string) error +} + +func NewLakeviewEmbedded(client *client.DatabricksClient) *LakeviewEmbeddedAPI { + return &LakeviewEmbeddedAPI{ + lakeviewEmbeddedImpl: lakeviewEmbeddedImpl{ + client: client, + }, + } +} + +// Token-based Lakeview APIs for embedding dashboards in external applications. +type LakeviewEmbeddedAPI struct { + lakeviewEmbeddedImpl +} + +// Read a published dashboard in an embedded ui. +// +// Get the current published dashboard within an embedded context. +func (a *LakeviewEmbeddedAPI) GetPublishedDashboardEmbeddedByDashboardId(ctx context.Context, dashboardId string) error { + return a.lakeviewEmbeddedImpl.GetPublishedDashboardEmbedded(ctx, GetPublishedDashboardEmbeddedRequest{ DashboardId: dashboardId, }) } -type QueryExecutionPreviewInterface interface { +type QueryExecutionInterface interface { // Cancel the results for the a query for a published, embedded dashboard. CancelPublishedQueryExecution(ctx context.Context, request CancelPublishedQueryExecutionRequest) (*CancelQueryExecutionResponse, error) @@ -398,15 +398,15 @@ type QueryExecutionPreviewInterface interface { PollPublishedQueryStatus(ctx context.Context, request PollPublishedQueryStatusRequest) (*PollQueryStatusResponse, error) } -func NewQueryExecutionPreview(client *client.DatabricksClient) *QueryExecutionPreviewAPI { - return &QueryExecutionPreviewAPI{ - queryExecutionPreviewImpl: queryExecutionPreviewImpl{ +func NewQueryExecution(client *client.DatabricksClient) *QueryExecutionAPI { + return &QueryExecutionAPI{ + queryExecutionImpl: queryExecutionImpl{ client: client, }, } } // Query execution APIs for AI / BI Dashboards -type QueryExecutionPreviewAPI struct { - queryExecutionPreviewImpl +type QueryExecutionAPI struct { + queryExecutionImpl } diff --git a/dashboards/v2preview/client.go b/dashboards/v2preview/client.go index 7022f0c6a..59d3c660a 100755 --- a/dashboards/v2preview/client.go +++ b/dashboards/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type GeniePreviewClient struct { - GeniePreviewInterface +type GenieClient struct { + GenieInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewGeniePreviewClient(cfg *config.Config) (*GeniePreviewClient, error) { +func NewGenieClient(cfg *config.Config) (*GenieClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewGeniePreviewClient(cfg *config.Config) (*GeniePreviewClient, error) { return nil, err } - return &GeniePreviewClient{ - Config: cfg, - apiClient: apiClient, - GeniePreviewInterface: NewGeniePreview(databricksClient), + return &GenieClient{ + Config: cfg, + apiClient: apiClient, + GenieInterface: NewGenie(databricksClient), }, nil } -type LakeviewEmbeddedPreviewClient struct { - LakeviewEmbeddedPreviewInterface +type LakeviewClient struct { + LakeviewInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewLakeviewEmbeddedPreviewClient(cfg *config.Config) (*LakeviewEmbeddedPreviewClient, error) { +func NewLakeviewClient(cfg *config.Config) (*LakeviewClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,20 +71,20 @@ func NewLakeviewEmbeddedPreviewClient(cfg *config.Config) (*LakeviewEmbeddedPrev return nil, err } - return &LakeviewEmbeddedPreviewClient{ - Config: cfg, - apiClient: apiClient, - LakeviewEmbeddedPreviewInterface: NewLakeviewEmbeddedPreview(databricksClient), + return &LakeviewClient{ + Config: cfg, + apiClient: apiClient, + LakeviewInterface: NewLakeview(databricksClient), }, nil } -type LakeviewPreviewClient struct { - LakeviewPreviewInterface +type LakeviewEmbeddedClient struct { + LakeviewEmbeddedInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewLakeviewPreviewClient(cfg *config.Config) (*LakeviewPreviewClient, error) { +func NewLakeviewEmbeddedClient(cfg *config.Config) (*LakeviewEmbeddedClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -105,20 +105,20 @@ func NewLakeviewPreviewClient(cfg *config.Config) (*LakeviewPreviewClient, error return nil, err } - return &LakeviewPreviewClient{ - Config: cfg, - apiClient: apiClient, - LakeviewPreviewInterface: NewLakeviewPreview(databricksClient), + return &LakeviewEmbeddedClient{ + Config: cfg, + apiClient: apiClient, + LakeviewEmbeddedInterface: NewLakeviewEmbedded(databricksClient), }, nil } -type QueryExecutionPreviewClient struct { - QueryExecutionPreviewInterface +type QueryExecutionClient struct { + QueryExecutionInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewQueryExecutionPreviewClient(cfg *config.Config) (*QueryExecutionPreviewClient, error) { +func NewQueryExecutionClient(cfg *config.Config) (*QueryExecutionClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -139,9 +139,9 @@ func NewQueryExecutionPreviewClient(cfg *config.Config) (*QueryExecutionPreviewC return nil, err } - return &QueryExecutionPreviewClient{ - Config: cfg, - apiClient: apiClient, - QueryExecutionPreviewInterface: NewQueryExecutionPreview(databricksClient), + return &QueryExecutionClient{ + Config: cfg, + apiClient: apiClient, + QueryExecutionInterface: NewQueryExecution(databricksClient), }, nil } diff --git a/dashboards/v2preview/impl.go b/dashboards/v2preview/impl.go index 077a5bcc3..ea1648e4f 100755 --- a/dashboards/v2preview/impl.go +++ b/dashboards/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just GeniePreview API methods -type geniePreviewImpl struct { +// unexported type that holds implementations of just Genie API methods +type genieImpl struct { client *client.DatabricksClient } -func (a *geniePreviewImpl) CreateMessage(ctx context.Context, request GenieCreateConversationMessageRequest) (*GenieMessage, error) { +func (a *genieImpl) CreateMessage(ctx context.Context, request GenieCreateConversationMessageRequest) (*GenieMessage, error) { var genieMessage GenieMessage path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/conversations/%v/messages", request.SpaceId, request.ConversationId) queryParams := make(map[string]any) @@ -28,7 +28,7 @@ func (a *geniePreviewImpl) CreateMessage(ctx context.Context, request GenieCreat return &genieMessage, err } -func (a *geniePreviewImpl) ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) { +func (a *genieImpl) ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) { var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/conversations/%v/messages/%v/execute-query", request.SpaceId, request.ConversationId, request.MessageId) queryParams := make(map[string]any) @@ -38,7 +38,7 @@ func (a *geniePreviewImpl) ExecuteMessageQuery(ctx context.Context, request Geni return &genieGetMessageQueryResultResponse, err } -func (a *geniePreviewImpl) GetMessage(ctx context.Context, request GenieGetConversationMessageRequest) (*GenieMessage, error) { +func (a *genieImpl) GetMessage(ctx context.Context, request GenieGetConversationMessageRequest) (*GenieMessage, error) { var genieMessage GenieMessage path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/conversations/%v/messages/%v", request.SpaceId, request.ConversationId, request.MessageId) queryParams := make(map[string]any) @@ -48,7 +48,7 @@ func (a *geniePreviewImpl) GetMessage(ctx context.Context, request GenieGetConve return &genieMessage, err } -func (a *geniePreviewImpl) GetMessageQueryResult(ctx context.Context, request GenieGetMessageQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) { +func (a *genieImpl) GetMessageQueryResult(ctx context.Context, request GenieGetMessageQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) { var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/conversations/%v/messages/%v/query-result", request.SpaceId, request.ConversationId, request.MessageId) queryParams := make(map[string]any) @@ -58,7 +58,7 @@ func (a *geniePreviewImpl) GetMessageQueryResult(ctx context.Context, request Ge return &genieGetMessageQueryResultResponse, err } -func (a *geniePreviewImpl) GetMessageQueryResultByAttachment(ctx context.Context, request GenieGetQueryResultByAttachmentRequest) (*GenieGetMessageQueryResultResponse, error) { +func (a *genieImpl) GetMessageQueryResultByAttachment(ctx context.Context, request GenieGetQueryResultByAttachmentRequest) (*GenieGetMessageQueryResultResponse, error) { var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/conversations/%v/messages/%v/query-result/%v", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId) queryParams := make(map[string]any) @@ -68,7 +68,7 @@ func (a *geniePreviewImpl) GetMessageQueryResultByAttachment(ctx context.Context return &genieGetMessageQueryResultResponse, err } -func (a *geniePreviewImpl) StartConversation(ctx context.Context, request GenieStartConversationMessageRequest) (*GenieStartConversationResponse, error) { +func (a *genieImpl) StartConversation(ctx context.Context, request GenieStartConversationMessageRequest) (*GenieStartConversationResponse, error) { var genieStartConversationResponse GenieStartConversationResponse path := fmt.Sprintf("/api/2.0preview/genie/spaces/%v/start-conversation", request.SpaceId) queryParams := make(map[string]any) @@ -79,27 +79,12 @@ func (a *geniePreviewImpl) StartConversation(ctx context.Context, request GenieS return &genieStartConversationResponse, err } -// unexported type that holds implementations of just LakeviewEmbeddedPreview API methods -type lakeviewEmbeddedPreviewImpl struct { +// unexported type that holds implementations of just Lakeview API methods +type lakeviewImpl struct { client *client.DatabricksClient } -func (a *lakeviewEmbeddedPreviewImpl) GetPublishedDashboardEmbedded(ctx context.Context, request GetPublishedDashboardEmbeddedRequest) error { - var getPublishedDashboardEmbeddedResponse GetPublishedDashboardEmbeddedResponse - path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/published/embedded", request.DashboardId) - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedDashboardEmbeddedResponse) - return err -} - -// unexported type that holds implementations of just LakeviewPreview API methods -type lakeviewPreviewImpl struct { - client *client.DatabricksClient -} - -func (a *lakeviewPreviewImpl) Create(ctx context.Context, request CreateDashboardRequest) (*Dashboard, error) { +func (a *lakeviewImpl) Create(ctx context.Context, request CreateDashboardRequest) (*Dashboard, error) { var dashboard Dashboard path := "/api/2.0preview/lakeview/dashboards" queryParams := make(map[string]any) @@ -110,7 +95,7 @@ func (a *lakeviewPreviewImpl) Create(ctx context.Context, request CreateDashboar return &dashboard, err } -func (a *lakeviewPreviewImpl) CreateSchedule(ctx context.Context, request CreateScheduleRequest) (*Schedule, error) { +func (a *lakeviewImpl) CreateSchedule(ctx context.Context, request CreateScheduleRequest) (*Schedule, error) { var schedule Schedule path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules", request.DashboardId) queryParams := make(map[string]any) @@ -121,7 +106,7 @@ func (a *lakeviewPreviewImpl) CreateSchedule(ctx context.Context, request Create return &schedule, err } -func (a *lakeviewPreviewImpl) CreateSubscription(ctx context.Context, request CreateSubscriptionRequest) (*Subscription, error) { +func (a *lakeviewImpl) CreateSubscription(ctx context.Context, request CreateSubscriptionRequest) (*Subscription, error) { var subscription Subscription path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v/subscriptions", request.DashboardId, request.ScheduleId) queryParams := make(map[string]any) @@ -132,7 +117,7 @@ func (a *lakeviewPreviewImpl) CreateSubscription(ctx context.Context, request Cr return &subscription, err } -func (a *lakeviewPreviewImpl) DeleteSchedule(ctx context.Context, request DeleteScheduleRequest) error { +func (a *lakeviewImpl) DeleteSchedule(ctx context.Context, request DeleteScheduleRequest) error { var deleteScheduleResponse DeleteScheduleResponse path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v", request.DashboardId, request.ScheduleId) queryParams := make(map[string]any) @@ -142,7 +127,7 @@ func (a *lakeviewPreviewImpl) DeleteSchedule(ctx context.Context, request Delete return err } -func (a *lakeviewPreviewImpl) DeleteSubscription(ctx context.Context, request DeleteSubscriptionRequest) error { +func (a *lakeviewImpl) DeleteSubscription(ctx context.Context, request DeleteSubscriptionRequest) error { var deleteSubscriptionResponse DeleteSubscriptionResponse path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v/subscriptions/%v", request.DashboardId, request.ScheduleId, request.SubscriptionId) queryParams := make(map[string]any) @@ -152,7 +137,7 @@ func (a *lakeviewPreviewImpl) DeleteSubscription(ctx context.Context, request De return err } -func (a *lakeviewPreviewImpl) Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error) { +func (a *lakeviewImpl) Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error) { var dashboard Dashboard path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v", request.DashboardId) queryParams := make(map[string]any) @@ -162,7 +147,7 @@ func (a *lakeviewPreviewImpl) Get(ctx context.Context, request GetDashboardReque return &dashboard, err } -func (a *lakeviewPreviewImpl) GetPublished(ctx context.Context, request GetPublishedDashboardRequest) (*PublishedDashboard, error) { +func (a *lakeviewImpl) GetPublished(ctx context.Context, request GetPublishedDashboardRequest) (*PublishedDashboard, error) { var publishedDashboard PublishedDashboard path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/published", request.DashboardId) queryParams := make(map[string]any) @@ -172,7 +157,7 @@ func (a *lakeviewPreviewImpl) GetPublished(ctx context.Context, request GetPubli return &publishedDashboard, err } -func (a *lakeviewPreviewImpl) GetSchedule(ctx context.Context, request GetScheduleRequest) (*Schedule, error) { +func (a *lakeviewImpl) GetSchedule(ctx context.Context, request GetScheduleRequest) (*Schedule, error) { var schedule Schedule path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v", request.DashboardId, request.ScheduleId) queryParams := make(map[string]any) @@ -182,7 +167,7 @@ func (a *lakeviewPreviewImpl) GetSchedule(ctx context.Context, request GetSchedu return &schedule, err } -func (a *lakeviewPreviewImpl) GetSubscription(ctx context.Context, request GetSubscriptionRequest) (*Subscription, error) { +func (a *lakeviewImpl) GetSubscription(ctx context.Context, request GetSubscriptionRequest) (*Subscription, error) { var subscription Subscription path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v/subscriptions/%v", request.DashboardId, request.ScheduleId, request.SubscriptionId) queryParams := make(map[string]any) @@ -193,7 +178,7 @@ func (a *lakeviewPreviewImpl) GetSubscription(ctx context.Context, request GetSu } // List dashboards. -func (a *lakeviewPreviewImpl) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { +func (a *lakeviewImpl) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { getNextPage := func(ctx context.Context, req ListDashboardsRequest) (*ListDashboardsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -218,11 +203,11 @@ func (a *lakeviewPreviewImpl) List(ctx context.Context, request ListDashboardsRe } // List dashboards. -func (a *lakeviewPreviewImpl) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { +func (a *lakeviewImpl) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { iterator := a.List(ctx, request) return listing.ToSlice[Dashboard](ctx, iterator) } -func (a *lakeviewPreviewImpl) internalList(ctx context.Context, request ListDashboardsRequest) (*ListDashboardsResponse, error) { +func (a *lakeviewImpl) internalList(ctx context.Context, request ListDashboardsRequest) (*ListDashboardsResponse, error) { var listDashboardsResponse ListDashboardsResponse path := "/api/2.0preview/lakeview/dashboards" queryParams := make(map[string]any) @@ -233,7 +218,7 @@ func (a *lakeviewPreviewImpl) internalList(ctx context.Context, request ListDash } // List dashboard schedules. -func (a *lakeviewPreviewImpl) ListSchedules(ctx context.Context, request ListSchedulesRequest) listing.Iterator[Schedule] { +func (a *lakeviewImpl) ListSchedules(ctx context.Context, request ListSchedulesRequest) listing.Iterator[Schedule] { getNextPage := func(ctx context.Context, req ListSchedulesRequest) (*ListSchedulesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -258,11 +243,11 @@ func (a *lakeviewPreviewImpl) ListSchedules(ctx context.Context, request ListSch } // List dashboard schedules. -func (a *lakeviewPreviewImpl) ListSchedulesAll(ctx context.Context, request ListSchedulesRequest) ([]Schedule, error) { +func (a *lakeviewImpl) ListSchedulesAll(ctx context.Context, request ListSchedulesRequest) ([]Schedule, error) { iterator := a.ListSchedules(ctx, request) return listing.ToSlice[Schedule](ctx, iterator) } -func (a *lakeviewPreviewImpl) internalListSchedules(ctx context.Context, request ListSchedulesRequest) (*ListSchedulesResponse, error) { +func (a *lakeviewImpl) internalListSchedules(ctx context.Context, request ListSchedulesRequest) (*ListSchedulesResponse, error) { var listSchedulesResponse ListSchedulesResponse path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules", request.DashboardId) queryParams := make(map[string]any) @@ -273,7 +258,7 @@ func (a *lakeviewPreviewImpl) internalListSchedules(ctx context.Context, request } // List schedule subscriptions. -func (a *lakeviewPreviewImpl) ListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) listing.Iterator[Subscription] { +func (a *lakeviewImpl) ListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) listing.Iterator[Subscription] { getNextPage := func(ctx context.Context, req ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -298,11 +283,11 @@ func (a *lakeviewPreviewImpl) ListSubscriptions(ctx context.Context, request Lis } // List schedule subscriptions. -func (a *lakeviewPreviewImpl) ListSubscriptionsAll(ctx context.Context, request ListSubscriptionsRequest) ([]Subscription, error) { +func (a *lakeviewImpl) ListSubscriptionsAll(ctx context.Context, request ListSubscriptionsRequest) ([]Subscription, error) { iterator := a.ListSubscriptions(ctx, request) return listing.ToSlice[Subscription](ctx, iterator) } -func (a *lakeviewPreviewImpl) internalListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { +func (a *lakeviewImpl) internalListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { var listSubscriptionsResponse ListSubscriptionsResponse path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v/subscriptions", request.DashboardId, request.ScheduleId) queryParams := make(map[string]any) @@ -312,7 +297,7 @@ func (a *lakeviewPreviewImpl) internalListSubscriptions(ctx context.Context, req return &listSubscriptionsResponse, err } -func (a *lakeviewPreviewImpl) Migrate(ctx context.Context, request MigrateDashboardRequest) (*Dashboard, error) { +func (a *lakeviewImpl) Migrate(ctx context.Context, request MigrateDashboardRequest) (*Dashboard, error) { var dashboard Dashboard path := "/api/2.0preview/lakeview/dashboards/migrate" queryParams := make(map[string]any) @@ -323,7 +308,7 @@ func (a *lakeviewPreviewImpl) Migrate(ctx context.Context, request MigrateDashbo return &dashboard, err } -func (a *lakeviewPreviewImpl) Publish(ctx context.Context, request PublishRequest) (*PublishedDashboard, error) { +func (a *lakeviewImpl) Publish(ctx context.Context, request PublishRequest) (*PublishedDashboard, error) { var publishedDashboard PublishedDashboard path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/published", request.DashboardId) queryParams := make(map[string]any) @@ -334,7 +319,7 @@ func (a *lakeviewPreviewImpl) Publish(ctx context.Context, request PublishReques return &publishedDashboard, err } -func (a *lakeviewPreviewImpl) Trash(ctx context.Context, request TrashDashboardRequest) error { +func (a *lakeviewImpl) Trash(ctx context.Context, request TrashDashboardRequest) error { var trashDashboardResponse TrashDashboardResponse path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v", request.DashboardId) queryParams := make(map[string]any) @@ -344,7 +329,7 @@ func (a *lakeviewPreviewImpl) Trash(ctx context.Context, request TrashDashboardR return err } -func (a *lakeviewPreviewImpl) Unpublish(ctx context.Context, request UnpublishDashboardRequest) error { +func (a *lakeviewImpl) Unpublish(ctx context.Context, request UnpublishDashboardRequest) error { var unpublishDashboardResponse UnpublishDashboardResponse path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/published", request.DashboardId) queryParams := make(map[string]any) @@ -354,7 +339,7 @@ func (a *lakeviewPreviewImpl) Unpublish(ctx context.Context, request UnpublishDa return err } -func (a *lakeviewPreviewImpl) Update(ctx context.Context, request UpdateDashboardRequest) (*Dashboard, error) { +func (a *lakeviewImpl) Update(ctx context.Context, request UpdateDashboardRequest) (*Dashboard, error) { var dashboard Dashboard path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v", request.DashboardId) queryParams := make(map[string]any) @@ -365,7 +350,7 @@ func (a *lakeviewPreviewImpl) Update(ctx context.Context, request UpdateDashboar return &dashboard, err } -func (a *lakeviewPreviewImpl) UpdateSchedule(ctx context.Context, request UpdateScheduleRequest) (*Schedule, error) { +func (a *lakeviewImpl) UpdateSchedule(ctx context.Context, request UpdateScheduleRequest) (*Schedule, error) { var schedule Schedule path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/schedules/%v", request.DashboardId, request.ScheduleId) queryParams := make(map[string]any) @@ -376,12 +361,27 @@ func (a *lakeviewPreviewImpl) UpdateSchedule(ctx context.Context, request Update return &schedule, err } -// unexported type that holds implementations of just QueryExecutionPreview API methods -type queryExecutionPreviewImpl struct { +// unexported type that holds implementations of just LakeviewEmbedded API methods +type lakeviewEmbeddedImpl struct { + client *client.DatabricksClient +} + +func (a *lakeviewEmbeddedImpl) GetPublishedDashboardEmbedded(ctx context.Context, request GetPublishedDashboardEmbeddedRequest) error { + var getPublishedDashboardEmbeddedResponse GetPublishedDashboardEmbeddedResponse + path := fmt.Sprintf("/api/2.0preview/lakeview/dashboards/%v/published/embedded", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedDashboardEmbeddedResponse) + return err +} + +// unexported type that holds implementations of just QueryExecution API methods +type queryExecutionImpl struct { client *client.DatabricksClient } -func (a *queryExecutionPreviewImpl) CancelPublishedQueryExecution(ctx context.Context, request CancelPublishedQueryExecutionRequest) (*CancelQueryExecutionResponse, error) { +func (a *queryExecutionImpl) CancelPublishedQueryExecution(ctx context.Context, request CancelPublishedQueryExecutionRequest) (*CancelQueryExecutionResponse, error) { var cancelQueryExecutionResponse CancelQueryExecutionResponse path := "/api/2.0preview/lakeview-query/query/published" queryParams := make(map[string]any) @@ -391,7 +391,7 @@ func (a *queryExecutionPreviewImpl) CancelPublishedQueryExecution(ctx context.Co return &cancelQueryExecutionResponse, err } -func (a *queryExecutionPreviewImpl) ExecutePublishedDashboardQuery(ctx context.Context, request ExecutePublishedDashboardQueryRequest) error { +func (a *queryExecutionImpl) ExecutePublishedDashboardQuery(ctx context.Context, request ExecutePublishedDashboardQueryRequest) error { var executeQueryResponse ExecuteQueryResponse path := "/api/2.0preview/lakeview-query/query/published" queryParams := make(map[string]any) @@ -402,7 +402,7 @@ func (a *queryExecutionPreviewImpl) ExecutePublishedDashboardQuery(ctx context.C return err } -func (a *queryExecutionPreviewImpl) PollPublishedQueryStatus(ctx context.Context, request PollPublishedQueryStatusRequest) (*PollQueryStatusResponse, error) { +func (a *queryExecutionImpl) PollPublishedQueryStatus(ctx context.Context, request PollPublishedQueryStatusRequest) (*PollQueryStatusResponse, error) { var pollQueryStatusResponse PollQueryStatusResponse path := "/api/2.0preview/lakeview-query/query/published" queryParams := make(map[string]any) diff --git a/files/v2preview/api.go b/files/v2preview/api.go index 1410f5134..d196443e7 100755 --- a/files/v2preview/api.go +++ b/files/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Dbfs Preview, Files Preview, etc. +// These APIs allow you to manage Dbfs, Files, etc. package filespreview import ( @@ -10,7 +10,8 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/listing" ) -type DbfsPreviewInterface interface { +type DbfsInterface interface { + dbfsAPIUtilities // Append data block. // @@ -187,9 +188,9 @@ type DbfsPreviewInterface interface { Read(ctx context.Context, request ReadDbfsRequest) (*ReadResponse, error) } -func NewDbfsPreview(client *client.DatabricksClient) *DbfsPreviewAPI { - return &DbfsPreviewAPI{ - dbfsPreviewImpl: dbfsPreviewImpl{ +func NewDbfs(client *client.DatabricksClient) *DbfsAPI { + return &DbfsAPI{ + dbfsImpl: dbfsImpl{ client: client, }, } @@ -197,16 +198,16 @@ func NewDbfsPreview(client *client.DatabricksClient) *DbfsPreviewAPI { // DBFS API makes it simple to interact with various data sources without having // to include a users credentials every time to read a file. -type DbfsPreviewAPI struct { - dbfsPreviewImpl +type DbfsAPI struct { + dbfsImpl } // Close the stream. // // Closes the stream specified by the input handle. If the handle does not // exist, this call throws an exception with “RESOURCE_DOES_NOT_EXIST“. -func (a *DbfsPreviewAPI) CloseByHandle(ctx context.Context, handle int64) error { - return a.dbfsPreviewImpl.Close(ctx, Close{ +func (a *DbfsAPI) CloseByHandle(ctx context.Context, handle int64) error { + return a.dbfsImpl.Close(ctx, Close{ Handle: handle, }) } @@ -215,8 +216,8 @@ func (a *DbfsPreviewAPI) CloseByHandle(ctx context.Context, handle int64) error // // Gets the file information for a file or directory. If the file or directory // does not exist, this call throws an exception with `RESOURCE_DOES_NOT_EXIST`. -func (a *DbfsPreviewAPI) GetStatusByPath(ctx context.Context, path string) (*FileInfo, error) { - return a.dbfsPreviewImpl.GetStatus(ctx, GetStatusRequest{ +func (a *DbfsAPI) GetStatusByPath(ctx context.Context, path string) (*FileInfo, error) { + return a.dbfsImpl.GetStatus(ctx, GetStatusRequest{ Path: path, }) } @@ -234,8 +235,8 @@ func (a *DbfsPreviewAPI) GetStatusByPath(ctx context.Context, path string) (*Fil // you perform such operations in the context of a cluster, using the [File // system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), // which provides the same functionality without timing out. -func (a *DbfsPreviewAPI) ListByPath(ctx context.Context, path string) (*ListStatusResponse, error) { - return a.dbfsPreviewImpl.internalList(ctx, ListDbfsRequest{ +func (a *DbfsAPI) ListByPath(ctx context.Context, path string) (*ListStatusResponse, error) { + return a.dbfsImpl.internalList(ctx, ListDbfsRequest{ Path: path, }) } @@ -247,13 +248,13 @@ func (a *DbfsPreviewAPI) ListByPath(ctx context.Context, path string) (*ListStat // this call throws an exception with `RESOURCE_ALREADY_EXISTS`. **Note**: If // this operation fails, it might have succeeded in creating some of the // necessary parent directories. -func (a *DbfsPreviewAPI) MkdirsByPath(ctx context.Context, path string) error { - return a.dbfsPreviewImpl.Mkdirs(ctx, MkDirs{ +func (a *DbfsAPI) MkdirsByPath(ctx context.Context, path string) error { + return a.dbfsImpl.Mkdirs(ctx, MkDirs{ Path: path, }) } -type FilesPreviewInterface interface { +type FilesInterface interface { // Create a directory. // @@ -375,9 +376,9 @@ type FilesPreviewInterface interface { Upload(ctx context.Context, request UploadRequest) error } -func NewFilesPreview(client *client.DatabricksClient) *FilesPreviewAPI { - return &FilesPreviewAPI{ - filesPreviewImpl: filesPreviewImpl{ +func NewFiles(client *client.DatabricksClient) *FilesAPI { + return &FilesAPI{ + filesImpl: filesImpl{ client: client, }, } @@ -404,15 +405,15 @@ func NewFilesPreview(client *client.DatabricksClient) *FilesPreviewAPI { // `DATABRICKS_ENABLE_EXPERIMENTAL_FILES_API_CLIENT=True`. // // [Unity Catalog volumes]: https://docs.databricks.com/en/connect/unity-catalog/volumes.html -type FilesPreviewAPI struct { - filesPreviewImpl +type FilesAPI struct { + filesImpl } // Delete a file. // // Deletes a file. If the request is successful, there is no response body. -func (a *FilesPreviewAPI) DeleteByFilePath(ctx context.Context, filePath string) error { - return a.filesPreviewImpl.Delete(ctx, DeleteFileRequest{ +func (a *FilesAPI) DeleteByFilePath(ctx context.Context, filePath string) error { + return a.filesImpl.Delete(ctx, DeleteFileRequest{ FilePath: filePath, }) } @@ -424,8 +425,8 @@ func (a *FilesPreviewAPI) DeleteByFilePath(ctx context.Context, filePath string) // To delete a non-empty directory, first delete all of its contents. This can // be done by listing the directory contents and deleting each file and // subdirectory recursively. -func (a *FilesPreviewAPI) DeleteDirectoryByDirectoryPath(ctx context.Context, directoryPath string) error { - return a.filesPreviewImpl.DeleteDirectory(ctx, DeleteDirectoryRequest{ +func (a *FilesAPI) DeleteDirectoryByDirectoryPath(ctx context.Context, directoryPath string) error { + return a.filesImpl.DeleteDirectory(ctx, DeleteDirectoryRequest{ DirectoryPath: directoryPath, }) } @@ -435,8 +436,8 @@ func (a *FilesPreviewAPI) DeleteDirectoryByDirectoryPath(ctx context.Context, di // Downloads a file. The file contents are the response body. This is a standard // HTTP file download, not a JSON RPC. It supports the Range and // If-Unmodified-Since HTTP headers. -func (a *FilesPreviewAPI) DownloadByFilePath(ctx context.Context, filePath string) (*DownloadResponse, error) { - return a.filesPreviewImpl.Download(ctx, DownloadRequest{ +func (a *FilesAPI) DownloadByFilePath(ctx context.Context, filePath string) (*DownloadResponse, error) { + return a.filesImpl.Download(ctx, DownloadRequest{ FilePath: filePath, }) } @@ -452,8 +453,8 @@ func (a *FilesPreviewAPI) DownloadByFilePath(ctx context.Context, filePath strin // If you wish to ensure the directory exists, you can instead use `PUT`, which // will create the directory if it does not exist, and is idempotent (it will // succeed if the directory already exists). -func (a *FilesPreviewAPI) GetDirectoryMetadataByDirectoryPath(ctx context.Context, directoryPath string) error { - return a.filesPreviewImpl.GetDirectoryMetadata(ctx, GetDirectoryMetadataRequest{ +func (a *FilesAPI) GetDirectoryMetadataByDirectoryPath(ctx context.Context, directoryPath string) error { + return a.filesImpl.GetDirectoryMetadata(ctx, GetDirectoryMetadataRequest{ DirectoryPath: directoryPath, }) } @@ -462,8 +463,8 @@ func (a *FilesPreviewAPI) GetDirectoryMetadataByDirectoryPath(ctx context.Contex // // Get the metadata of a file. The response HTTP headers contain the metadata. // There is no response body. -func (a *FilesPreviewAPI) GetMetadataByFilePath(ctx context.Context, filePath string) (*GetMetadataResponse, error) { - return a.filesPreviewImpl.GetMetadata(ctx, GetMetadataRequest{ +func (a *FilesAPI) GetMetadataByFilePath(ctx context.Context, filePath string) (*GetMetadataResponse, error) { + return a.filesImpl.GetMetadata(ctx, GetMetadataRequest{ FilePath: filePath, }) } @@ -472,8 +473,8 @@ func (a *FilesPreviewAPI) GetMetadataByFilePath(ctx context.Context, filePath st // // Returns the contents of a directory. If there is no directory at the // specified path, the API returns a HTTP 404 error. -func (a *FilesPreviewAPI) ListDirectoryContentsByDirectoryPath(ctx context.Context, directoryPath string) (*ListDirectoryResponse, error) { - return a.filesPreviewImpl.internalListDirectoryContents(ctx, ListDirectoryContentsRequest{ +func (a *FilesAPI) ListDirectoryContentsByDirectoryPath(ctx context.Context, directoryPath string) (*ListDirectoryResponse, error) { + return a.filesImpl.internalListDirectoryContents(ctx, ListDirectoryContentsRequest{ DirectoryPath: directoryPath, }) } diff --git a/files/v2preview/client.go b/files/v2preview/client.go index 61044cd64..bfc137741 100755 --- a/files/v2preview/client.go +++ b/files/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type DbfsPreviewClient struct { - DbfsPreviewInterface +type DbfsClient struct { + DbfsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewDbfsPreviewClient(cfg *config.Config) (*DbfsPreviewClient, error) { +func NewDbfsClient(cfg *config.Config) (*DbfsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewDbfsPreviewClient(cfg *config.Config) (*DbfsPreviewClient, error) { return nil, err } - return &DbfsPreviewClient{ - Config: cfg, - apiClient: apiClient, - DbfsPreviewInterface: NewDbfsPreview(databricksClient), + return &DbfsClient{ + Config: cfg, + apiClient: apiClient, + DbfsInterface: NewDbfs(databricksClient), }, nil } -type FilesPreviewClient struct { - FilesPreviewInterface +type FilesClient struct { + FilesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewFilesPreviewClient(cfg *config.Config) (*FilesPreviewClient, error) { +func NewFilesClient(cfg *config.Config) (*FilesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,9 +71,9 @@ func NewFilesPreviewClient(cfg *config.Config) (*FilesPreviewClient, error) { return nil, err } - return &FilesPreviewClient{ - Config: cfg, - apiClient: apiClient, - FilesPreviewInterface: NewFilesPreview(databricksClient), + return &FilesClient{ + Config: cfg, + apiClient: apiClient, + FilesInterface: NewFiles(databricksClient), }, nil } diff --git a/files/v2preview/impl.go b/files/v2preview/impl.go index 39613fcbe..c0cd4e90c 100755 --- a/files/v2preview/impl.go +++ b/files/v2preview/impl.go @@ -14,12 +14,12 @@ import ( "golang.org/x/exp/slices" ) -// unexported type that holds implementations of just DbfsPreview API methods -type dbfsPreviewImpl struct { +// unexported type that holds implementations of just Dbfs API methods +type dbfsImpl struct { client *client.DatabricksClient } -func (a *dbfsPreviewImpl) AddBlock(ctx context.Context, request AddBlock) error { +func (a *dbfsImpl) AddBlock(ctx context.Context, request AddBlock) error { var addBlockResponse AddBlockResponse path := "/api/2.0preview/dbfs/add-block" queryParams := make(map[string]any) @@ -30,7 +30,7 @@ func (a *dbfsPreviewImpl) AddBlock(ctx context.Context, request AddBlock) error return err } -func (a *dbfsPreviewImpl) Close(ctx context.Context, request Close) error { +func (a *dbfsImpl) Close(ctx context.Context, request Close) error { var closeResponse CloseResponse path := "/api/2.0preview/dbfs/close" queryParams := make(map[string]any) @@ -41,7 +41,7 @@ func (a *dbfsPreviewImpl) Close(ctx context.Context, request Close) error { return err } -func (a *dbfsPreviewImpl) Create(ctx context.Context, request Create) (*CreateResponse, error) { +func (a *dbfsImpl) Create(ctx context.Context, request Create) (*CreateResponse, error) { var createResponse CreateResponse path := "/api/2.0preview/dbfs/create" queryParams := make(map[string]any) @@ -52,7 +52,7 @@ func (a *dbfsPreviewImpl) Create(ctx context.Context, request Create) (*CreateRe return &createResponse, err } -func (a *dbfsPreviewImpl) Delete(ctx context.Context, request Delete) error { +func (a *dbfsImpl) Delete(ctx context.Context, request Delete) error { var deleteResponse DeleteResponse path := "/api/2.0preview/dbfs/delete" queryParams := make(map[string]any) @@ -63,7 +63,7 @@ func (a *dbfsPreviewImpl) Delete(ctx context.Context, request Delete) error { return err } -func (a *dbfsPreviewImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*FileInfo, error) { +func (a *dbfsImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*FileInfo, error) { var fileInfo FileInfo path := "/api/2.0preview/dbfs/get-status" queryParams := make(map[string]any) @@ -86,7 +86,7 @@ func (a *dbfsPreviewImpl) GetStatus(ctx context.Context, request GetStatusReques // you perform such operations in the context of a cluster, using the [File // system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), // which provides the same functionality without timing out. -func (a *dbfsPreviewImpl) List(ctx context.Context, request ListDbfsRequest) listing.Iterator[FileInfo] { +func (a *dbfsImpl) List(ctx context.Context, request ListDbfsRequest) listing.Iterator[FileInfo] { getNextPage := func(ctx context.Context, req ListDbfsRequest) (*ListStatusResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -117,11 +117,11 @@ func (a *dbfsPreviewImpl) List(ctx context.Context, request ListDbfsRequest) lis // you perform such operations in the context of a cluster, using the [File // system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), // which provides the same functionality without timing out. -func (a *dbfsPreviewImpl) ListAll(ctx context.Context, request ListDbfsRequest) ([]FileInfo, error) { +func (a *dbfsImpl) ListAll(ctx context.Context, request ListDbfsRequest) ([]FileInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[FileInfo](ctx, iterator) } -func (a *dbfsPreviewImpl) internalList(ctx context.Context, request ListDbfsRequest) (*ListStatusResponse, error) { +func (a *dbfsImpl) internalList(ctx context.Context, request ListDbfsRequest) (*ListStatusResponse, error) { var listStatusResponse ListStatusResponse path := "/api/2.0preview/dbfs/list" queryParams := make(map[string]any) @@ -131,7 +131,7 @@ func (a *dbfsPreviewImpl) internalList(ctx context.Context, request ListDbfsRequ return &listStatusResponse, err } -func (a *dbfsPreviewImpl) Mkdirs(ctx context.Context, request MkDirs) error { +func (a *dbfsImpl) Mkdirs(ctx context.Context, request MkDirs) error { var mkDirsResponse MkDirsResponse path := "/api/2.0preview/dbfs/mkdirs" queryParams := make(map[string]any) @@ -142,7 +142,7 @@ func (a *dbfsPreviewImpl) Mkdirs(ctx context.Context, request MkDirs) error { return err } -func (a *dbfsPreviewImpl) Move(ctx context.Context, request Move) error { +func (a *dbfsImpl) Move(ctx context.Context, request Move) error { var moveResponse MoveResponse path := "/api/2.0preview/dbfs/move" queryParams := make(map[string]any) @@ -153,7 +153,7 @@ func (a *dbfsPreviewImpl) Move(ctx context.Context, request Move) error { return err } -func (a *dbfsPreviewImpl) Put(ctx context.Context, request Put) error { +func (a *dbfsImpl) Put(ctx context.Context, request Put) error { var putResponse PutResponse path := "/api/2.0preview/dbfs/put" queryParams := make(map[string]any) @@ -164,7 +164,7 @@ func (a *dbfsPreviewImpl) Put(ctx context.Context, request Put) error { return err } -func (a *dbfsPreviewImpl) Read(ctx context.Context, request ReadDbfsRequest) (*ReadResponse, error) { +func (a *dbfsImpl) Read(ctx context.Context, request ReadDbfsRequest) (*ReadResponse, error) { var readResponse ReadResponse path := "/api/2.0preview/dbfs/read" queryParams := make(map[string]any) @@ -174,12 +174,12 @@ func (a *dbfsPreviewImpl) Read(ctx context.Context, request ReadDbfsRequest) (*R return &readResponse, err } -// unexported type that holds implementations of just FilesPreview API methods -type filesPreviewImpl struct { +// unexported type that holds implementations of just Files API methods +type filesImpl struct { client *client.DatabricksClient } -func (a *filesPreviewImpl) CreateDirectory(ctx context.Context, request CreateDirectoryRequest) error { +func (a *filesImpl) CreateDirectory(ctx context.Context, request CreateDirectoryRequest) error { var createDirectoryResponse CreateDirectoryResponse path := fmt.Sprintf("/api/2.0preview/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) queryParams := make(map[string]any) @@ -188,7 +188,7 @@ func (a *filesPreviewImpl) CreateDirectory(ctx context.Context, request CreateDi return err } -func (a *filesPreviewImpl) Delete(ctx context.Context, request DeleteFileRequest) error { +func (a *filesImpl) Delete(ctx context.Context, request DeleteFileRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) queryParams := make(map[string]any) @@ -197,7 +197,7 @@ func (a *filesPreviewImpl) Delete(ctx context.Context, request DeleteFileRequest return err } -func (a *filesPreviewImpl) DeleteDirectory(ctx context.Context, request DeleteDirectoryRequest) error { +func (a *filesImpl) DeleteDirectory(ctx context.Context, request DeleteDirectoryRequest) error { var deleteDirectoryResponse DeleteDirectoryResponse path := fmt.Sprintf("/api/2.0preview/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) queryParams := make(map[string]any) @@ -206,7 +206,7 @@ func (a *filesPreviewImpl) DeleteDirectory(ctx context.Context, request DeleteDi return err } -func (a *filesPreviewImpl) Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) { +func (a *filesImpl) Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) { var downloadResponse DownloadResponse path := fmt.Sprintf("/api/2.0preview/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) queryParams := make(map[string]any) @@ -216,7 +216,7 @@ func (a *filesPreviewImpl) Download(ctx context.Context, request DownloadRequest return &downloadResponse, err } -func (a *filesPreviewImpl) GetDirectoryMetadata(ctx context.Context, request GetDirectoryMetadataRequest) error { +func (a *filesImpl) GetDirectoryMetadata(ctx context.Context, request GetDirectoryMetadataRequest) error { var getDirectoryMetadataResponse GetDirectoryMetadataResponse path := fmt.Sprintf("/api/2.0preview/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) queryParams := make(map[string]any) @@ -225,7 +225,7 @@ func (a *filesPreviewImpl) GetDirectoryMetadata(ctx context.Context, request Get return err } -func (a *filesPreviewImpl) GetMetadata(ctx context.Context, request GetMetadataRequest) (*GetMetadataResponse, error) { +func (a *filesImpl) GetMetadata(ctx context.Context, request GetMetadataRequest) (*GetMetadataResponse, error) { var getMetadataResponse GetMetadataResponse path := fmt.Sprintf("/api/2.0preview/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) queryParams := make(map[string]any) @@ -238,7 +238,7 @@ func (a *filesPreviewImpl) GetMetadata(ctx context.Context, request GetMetadataR // // Returns the contents of a directory. If there is no directory at the // specified path, the API returns a HTTP 404 error. -func (a *filesPreviewImpl) ListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) listing.Iterator[DirectoryEntry] { +func (a *filesImpl) ListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) listing.Iterator[DirectoryEntry] { getNextPage := func(ctx context.Context, req ListDirectoryContentsRequest) (*ListDirectoryResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -266,12 +266,12 @@ func (a *filesPreviewImpl) ListDirectoryContents(ctx context.Context, request Li // // Returns the contents of a directory. If there is no directory at the // specified path, the API returns a HTTP 404 error. -func (a *filesPreviewImpl) ListDirectoryContentsAll(ctx context.Context, request ListDirectoryContentsRequest) ([]DirectoryEntry, error) { +func (a *filesImpl) ListDirectoryContentsAll(ctx context.Context, request ListDirectoryContentsRequest) ([]DirectoryEntry, error) { iterator := a.ListDirectoryContents(ctx, request) return listing.ToSliceN[DirectoryEntry, int64](ctx, iterator, request.PageSize) } -func (a *filesPreviewImpl) internalListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) (*ListDirectoryResponse, error) { +func (a *filesImpl) internalListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) (*ListDirectoryResponse, error) { var listDirectoryResponse ListDirectoryResponse path := fmt.Sprintf("/api/2.0preview/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) queryParams := make(map[string]any) @@ -281,7 +281,7 @@ func (a *filesPreviewImpl) internalListDirectoryContents(ctx context.Context, re return &listDirectoryResponse, err } -func (a *filesPreviewImpl) Upload(ctx context.Context, request UploadRequest) error { +func (a *filesImpl) Upload(ctx context.Context, request UploadRequest) error { var uploadResponse UploadResponse path := fmt.Sprintf("/api/2.0preview/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) queryParams := make(map[string]any) diff --git a/iam/v2preview/api.go b/iam/v2preview/api.go index 936bfefa1..b7d13ad39 100755 --- a/iam/v2preview/api.go +++ b/iam/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Access Control Preview, Account Access Control Preview, Account Access Control Proxy Preview, Account Groups Preview, Account Service Principals Preview, Account Users Preview, Current User Preview, Groups Preview, Permission Migration Preview, Permissions Preview, Service Principals Preview, Users Preview, Workspace Assignment Preview, etc. +// These APIs allow you to manage Access Control, Account Access Control, Account Access Control Proxy, Account Groups, Account Service Principals, Account Users, Current User, Groups, Permission Migration, Permissions, Service Principals, Users, Workspace Assignment, etc. package iampreview import ( @@ -12,26 +12,26 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type AccessControlPreviewInterface interface { +type AccessControlInterface interface { // Check access policy to a resource. CheckPolicy(ctx context.Context, request CheckPolicyRequest) (*CheckPolicyResponse, error) } -func NewAccessControlPreview(client *client.DatabricksClient) *AccessControlPreviewAPI { - return &AccessControlPreviewAPI{ - accessControlPreviewImpl: accessControlPreviewImpl{ +func NewAccessControl(client *client.DatabricksClient) *AccessControlAPI { + return &AccessControlAPI{ + accessControlImpl: accessControlImpl{ client: client, }, } } // Rule based Access Control for Databricks Resources. -type AccessControlPreviewAPI struct { - accessControlPreviewImpl +type AccessControlAPI struct { + accessControlImpl } -type AccountAccessControlPreviewInterface interface { +type AccountAccessControlInterface interface { // Get assignable roles for a resource. // @@ -55,9 +55,9 @@ type AccountAccessControlPreviewInterface interface { UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) } -func NewAccountAccessControlPreview(client *client.DatabricksClient) *AccountAccessControlPreviewAPI { - return &AccountAccessControlPreviewAPI{ - accountAccessControlPreviewImpl: accountAccessControlPreviewImpl{ +func NewAccountAccessControl(client *client.DatabricksClient) *AccountAccessControlAPI { + return &AccountAccessControlAPI{ + accountAccessControlImpl: accountAccessControlImpl{ client: client, }, } @@ -66,11 +66,11 @@ func NewAccountAccessControlPreview(client *client.DatabricksClient) *AccountAcc // These APIs manage access rules on resources in an account. Currently, only // grant rules are supported. A grant rule specifies a role assigned to a set of // principals. A list of rules attached to a resource is called a rule set. -type AccountAccessControlPreviewAPI struct { - accountAccessControlPreviewImpl +type AccountAccessControlAPI struct { + accountAccessControlImpl } -type AccountAccessControlProxyPreviewInterface interface { +type AccountAccessControlProxyInterface interface { // Get assignable roles for a resource. // @@ -94,9 +94,9 @@ type AccountAccessControlProxyPreviewInterface interface { UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) } -func NewAccountAccessControlProxyPreview(client *client.DatabricksClient) *AccountAccessControlProxyPreviewAPI { - return &AccountAccessControlProxyPreviewAPI{ - accountAccessControlProxyPreviewImpl: accountAccessControlProxyPreviewImpl{ +func NewAccountAccessControlProxy(client *client.DatabricksClient) *AccountAccessControlProxyAPI { + return &AccountAccessControlProxyAPI{ + accountAccessControlProxyImpl: accountAccessControlProxyImpl{ client: client, }, } @@ -106,11 +106,11 @@ func NewAccountAccessControlProxyPreview(client *client.DatabricksClient) *Accou // grant rules are supported. A grant rule specifies a role assigned to a set of // principals. A list of rules attached to a resource is called a rule set. A // workspace must belong to an account for these APIs to work. -type AccountAccessControlProxyPreviewAPI struct { - accountAccessControlProxyPreviewImpl +type AccountAccessControlProxyAPI struct { + accountAccessControlProxyImpl } -type AccountGroupsPreviewInterface interface { +type AccountGroupsInterface interface { // Create a new group. // @@ -152,7 +152,7 @@ type AccountGroupsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListAccountGroupsRequest) ([]Group, error) - // GroupDisplayNameToIdMap calls [AccountGroupsPreviewAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. + // GroupDisplayNameToIdMap calls [AccountGroupsAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. // // Returns an error if there's more than one [Group] with the same .DisplayName. // @@ -161,7 +161,7 @@ type AccountGroupsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. GroupDisplayNameToIdMap(ctx context.Context, request ListAccountGroupsRequest) (map[string]string, error) - // GetByDisplayName calls [AccountGroupsPreviewAPI.GroupDisplayNameToIdMap] and returns a single [Group]. + // GetByDisplayName calls [AccountGroupsAPI.GroupDisplayNameToIdMap] and returns a single [Group]. // // Returns an error if there's more than one [Group] with the same .DisplayName. // @@ -181,9 +181,9 @@ type AccountGroupsPreviewInterface interface { Update(ctx context.Context, request Group) error } -func NewAccountGroupsPreview(client *client.DatabricksClient) *AccountGroupsPreviewAPI { - return &AccountGroupsPreviewAPI{ - accountGroupsPreviewImpl: accountGroupsPreviewImpl{ +func NewAccountGroups(client *client.DatabricksClient) *AccountGroupsAPI { + return &AccountGroupsAPI{ + accountGroupsImpl: accountGroupsImpl{ client: client, }, } @@ -196,15 +196,15 @@ func NewAccountGroupsPreview(client *client.DatabricksClient) *AccountGroupsPrev // policies in Unity Catalog to groups, instead of to users individually. All // Databricks account identities can be assigned as members of groups, and // members inherit permissions that are assigned to their group. -type AccountGroupsPreviewAPI struct { - accountGroupsPreviewImpl +type AccountGroupsAPI struct { + accountGroupsImpl } // Delete a group. // // Deletes a group from the Databricks account. -func (a *AccountGroupsPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.accountGroupsPreviewImpl.Delete(ctx, DeleteAccountGroupRequest{ +func (a *AccountGroupsAPI) DeleteById(ctx context.Context, id string) error { + return a.accountGroupsImpl.Delete(ctx, DeleteAccountGroupRequest{ Id: id, }) } @@ -212,20 +212,20 @@ func (a *AccountGroupsPreviewAPI) DeleteById(ctx context.Context, id string) err // Get group details. // // Gets the information for a specific group in the Databricks account. -func (a *AccountGroupsPreviewAPI) GetById(ctx context.Context, id string) (*Group, error) { - return a.accountGroupsPreviewImpl.Get(ctx, GetAccountGroupRequest{ +func (a *AccountGroupsAPI) GetById(ctx context.Context, id string) (*Group, error) { + return a.accountGroupsImpl.Get(ctx, GetAccountGroupRequest{ Id: id, }) } -// GroupDisplayNameToIdMap calls [AccountGroupsPreviewAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. +// GroupDisplayNameToIdMap calls [AccountGroupsAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. // // Returns an error if there's more than one [Group] with the same .DisplayName. // // Note: All [Group] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *AccountGroupsPreviewAPI) GroupDisplayNameToIdMap(ctx context.Context, request ListAccountGroupsRequest) (map[string]string, error) { +func (a *AccountGroupsAPI) GroupDisplayNameToIdMap(ctx context.Context, request ListAccountGroupsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -243,14 +243,14 @@ func (a *AccountGroupsPreviewAPI) GroupDisplayNameToIdMap(ctx context.Context, r return mapping, nil } -// GetByDisplayName calls [AccountGroupsPreviewAPI.GroupDisplayNameToIdMap] and returns a single [Group]. +// GetByDisplayName calls [AccountGroupsAPI.GroupDisplayNameToIdMap] and returns a single [Group]. // // Returns an error if there's more than one [Group] with the same .DisplayName. // // Note: All [Group] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *AccountGroupsPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*Group, error) { +func (a *AccountGroupsAPI) GetByDisplayName(ctx context.Context, name string) (*Group, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListAccountGroupsRequest{}) if err != nil { @@ -271,7 +271,7 @@ func (a *AccountGroupsPreviewAPI) GetByDisplayName(ctx context.Context, name str return &alternatives[0], nil } -type AccountServicePrincipalsPreviewInterface interface { +type AccountServicePrincipalsInterface interface { // Create a service principal. // @@ -314,7 +314,7 @@ type AccountServicePrincipalsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListAccountServicePrincipalsRequest) ([]ServicePrincipal, error) - // ServicePrincipalDisplayNameToIdMap calls [AccountServicePrincipalsPreviewAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. + // ServicePrincipalDisplayNameToIdMap calls [AccountServicePrincipalsAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. // // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. // @@ -323,7 +323,7 @@ type AccountServicePrincipalsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ServicePrincipalDisplayNameToIdMap(ctx context.Context, request ListAccountServicePrincipalsRequest) (map[string]string, error) - // GetByDisplayName calls [AccountServicePrincipalsPreviewAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. + // GetByDisplayName calls [AccountServicePrincipalsAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. // // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. // @@ -346,9 +346,9 @@ type AccountServicePrincipalsPreviewInterface interface { Update(ctx context.Context, request ServicePrincipal) error } -func NewAccountServicePrincipalsPreview(client *client.DatabricksClient) *AccountServicePrincipalsPreviewAPI { - return &AccountServicePrincipalsPreviewAPI{ - accountServicePrincipalsPreviewImpl: accountServicePrincipalsPreviewImpl{ +func NewAccountServicePrincipals(client *client.DatabricksClient) *AccountServicePrincipalsAPI { + return &AccountServicePrincipalsAPI{ + accountServicePrincipalsImpl: accountServicePrincipalsImpl{ client: client, }, } @@ -360,15 +360,15 @@ func NewAccountServicePrincipalsPreview(client *client.DatabricksClient) *Accoun // on production data run with service principals, interactive users do not need // any write, delete, or modify privileges in production. This eliminates the // risk of a user overwriting production data by accident. -type AccountServicePrincipalsPreviewAPI struct { - accountServicePrincipalsPreviewImpl +type AccountServicePrincipalsAPI struct { + accountServicePrincipalsImpl } // Delete a service principal. // // Delete a single service principal in the Databricks account. -func (a *AccountServicePrincipalsPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.accountServicePrincipalsPreviewImpl.Delete(ctx, DeleteAccountServicePrincipalRequest{ +func (a *AccountServicePrincipalsAPI) DeleteById(ctx context.Context, id string) error { + return a.accountServicePrincipalsImpl.Delete(ctx, DeleteAccountServicePrincipalRequest{ Id: id, }) } @@ -377,20 +377,20 @@ func (a *AccountServicePrincipalsPreviewAPI) DeleteById(ctx context.Context, id // // Gets the details for a single service principal define in the Databricks // account. -func (a *AccountServicePrincipalsPreviewAPI) GetById(ctx context.Context, id string) (*ServicePrincipal, error) { - return a.accountServicePrincipalsPreviewImpl.Get(ctx, GetAccountServicePrincipalRequest{ +func (a *AccountServicePrincipalsAPI) GetById(ctx context.Context, id string) (*ServicePrincipal, error) { + return a.accountServicePrincipalsImpl.Get(ctx, GetAccountServicePrincipalRequest{ Id: id, }) } -// ServicePrincipalDisplayNameToIdMap calls [AccountServicePrincipalsPreviewAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. +// ServicePrincipalDisplayNameToIdMap calls [AccountServicePrincipalsAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. // // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. // // Note: All [ServicePrincipal] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *AccountServicePrincipalsPreviewAPI) ServicePrincipalDisplayNameToIdMap(ctx context.Context, request ListAccountServicePrincipalsRequest) (map[string]string, error) { +func (a *AccountServicePrincipalsAPI) ServicePrincipalDisplayNameToIdMap(ctx context.Context, request ListAccountServicePrincipalsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -408,14 +408,14 @@ func (a *AccountServicePrincipalsPreviewAPI) ServicePrincipalDisplayNameToIdMap( return mapping, nil } -// GetByDisplayName calls [AccountServicePrincipalsPreviewAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. +// GetByDisplayName calls [AccountServicePrincipalsAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. // // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. // // Note: All [ServicePrincipal] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *AccountServicePrincipalsPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*ServicePrincipal, error) { +func (a *AccountServicePrincipalsAPI) GetByDisplayName(ctx context.Context, name string) (*ServicePrincipal, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListAccountServicePrincipalsRequest{}) if err != nil { @@ -436,7 +436,7 @@ func (a *AccountServicePrincipalsPreviewAPI) GetByDisplayName(ctx context.Contex return &alternatives[0], nil } -type AccountUsersPreviewInterface interface { +type AccountUsersInterface interface { // Create a new user. // @@ -480,7 +480,7 @@ type AccountUsersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListAccountUsersRequest) ([]User, error) - // UserUserNameToIdMap calls [AccountUsersPreviewAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. + // UserUserNameToIdMap calls [AccountUsersAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. // // Returns an error if there's more than one [User] with the same .UserName. // @@ -489,7 +489,7 @@ type AccountUsersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. UserUserNameToIdMap(ctx context.Context, request ListAccountUsersRequest) (map[string]string, error) - // GetByUserName calls [AccountUsersPreviewAPI.UserUserNameToIdMap] and returns a single [User]. + // GetByUserName calls [AccountUsersAPI.UserUserNameToIdMap] and returns a single [User]. // // Returns an error if there's more than one [User] with the same .UserName. // @@ -510,9 +510,9 @@ type AccountUsersPreviewInterface interface { Update(ctx context.Context, request User) error } -func NewAccountUsersPreview(client *client.DatabricksClient) *AccountUsersPreviewAPI { - return &AccountUsersPreviewAPI{ - accountUsersPreviewImpl: accountUsersPreviewImpl{ +func NewAccountUsers(client *client.DatabricksClient) *AccountUsersAPI { + return &AccountUsersAPI{ + accountUsersImpl: accountUsersImpl{ client: client, }, } @@ -529,16 +529,16 @@ func NewAccountUsersPreview(client *client.DatabricksClient) *AccountUsersPrevie // provider and that user’s account will also be removed from Databricks // account. This ensures a consistent offboarding process and prevents // unauthorized users from accessing sensitive data. -type AccountUsersPreviewAPI struct { - accountUsersPreviewImpl +type AccountUsersAPI struct { + accountUsersImpl } // Delete a user. // // Deletes a user. Deleting a user from a Databricks account also removes // objects associated with the user. -func (a *AccountUsersPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.accountUsersPreviewImpl.Delete(ctx, DeleteAccountUserRequest{ +func (a *AccountUsersAPI) DeleteById(ctx context.Context, id string) error { + return a.accountUsersImpl.Delete(ctx, DeleteAccountUserRequest{ Id: id, }) } @@ -546,20 +546,20 @@ func (a *AccountUsersPreviewAPI) DeleteById(ctx context.Context, id string) erro // Get user details. // // Gets information for a specific user in Databricks account. -func (a *AccountUsersPreviewAPI) GetById(ctx context.Context, id string) (*User, error) { - return a.accountUsersPreviewImpl.Get(ctx, GetAccountUserRequest{ +func (a *AccountUsersAPI) GetById(ctx context.Context, id string) (*User, error) { + return a.accountUsersImpl.Get(ctx, GetAccountUserRequest{ Id: id, }) } -// UserUserNameToIdMap calls [AccountUsersPreviewAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. +// UserUserNameToIdMap calls [AccountUsersAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. // // Returns an error if there's more than one [User] with the same .UserName. // // Note: All [User] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *AccountUsersPreviewAPI) UserUserNameToIdMap(ctx context.Context, request ListAccountUsersRequest) (map[string]string, error) { +func (a *AccountUsersAPI) UserUserNameToIdMap(ctx context.Context, request ListAccountUsersRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -577,14 +577,14 @@ func (a *AccountUsersPreviewAPI) UserUserNameToIdMap(ctx context.Context, reques return mapping, nil } -// GetByUserName calls [AccountUsersPreviewAPI.UserUserNameToIdMap] and returns a single [User]. +// GetByUserName calls [AccountUsersAPI.UserUserNameToIdMap] and returns a single [User]. // // Returns an error if there's more than one [User] with the same .UserName. // // Note: All [User] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *AccountUsersPreviewAPI) GetByUserName(ctx context.Context, name string) (*User, error) { +func (a *AccountUsersAPI) GetByUserName(ctx context.Context, name string) (*User, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListAccountUsersRequest{}) if err != nil { @@ -605,7 +605,7 @@ func (a *AccountUsersPreviewAPI) GetByUserName(ctx context.Context, name string) return &alternatives[0], nil } -type CurrentUserPreviewInterface interface { +type CurrentUserInterface interface { // Get current user info. // @@ -613,9 +613,9 @@ type CurrentUserPreviewInterface interface { Me(ctx context.Context) (*User, error) } -func NewCurrentUserPreview(client *client.DatabricksClient) *CurrentUserPreviewAPI { - return &CurrentUserPreviewAPI{ - currentUserPreviewImpl: currentUserPreviewImpl{ +func NewCurrentUser(client *client.DatabricksClient) *CurrentUserAPI { + return &CurrentUserAPI{ + currentUserImpl: currentUserImpl{ client: client, }, } @@ -623,11 +623,11 @@ func NewCurrentUserPreview(client *client.DatabricksClient) *CurrentUserPreviewA // This API allows retrieving information about currently authenticated user or // service principal. -type CurrentUserPreviewAPI struct { - currentUserPreviewImpl +type CurrentUserAPI struct { + currentUserImpl } -type GroupsPreviewInterface interface { +type GroupsInterface interface { // Create a new group. // @@ -669,7 +669,7 @@ type GroupsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListGroupsRequest) ([]Group, error) - // GroupDisplayNameToIdMap calls [GroupsPreviewAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. + // GroupDisplayNameToIdMap calls [GroupsAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. // // Returns an error if there's more than one [Group] with the same .DisplayName. // @@ -678,7 +678,7 @@ type GroupsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. GroupDisplayNameToIdMap(ctx context.Context, request ListGroupsRequest) (map[string]string, error) - // GetByDisplayName calls [GroupsPreviewAPI.GroupDisplayNameToIdMap] and returns a single [Group]. + // GetByDisplayName calls [GroupsAPI.GroupDisplayNameToIdMap] and returns a single [Group]. // // Returns an error if there's more than one [Group] with the same .DisplayName. // @@ -698,9 +698,9 @@ type GroupsPreviewInterface interface { Update(ctx context.Context, request Group) error } -func NewGroupsPreview(client *client.DatabricksClient) *GroupsPreviewAPI { - return &GroupsPreviewAPI{ - groupsPreviewImpl: groupsPreviewImpl{ +func NewGroups(client *client.DatabricksClient) *GroupsAPI { + return &GroupsAPI{ + groupsImpl: groupsImpl{ client: client, }, } @@ -713,15 +713,15 @@ func NewGroupsPreview(client *client.DatabricksClient) *GroupsPreviewAPI { // policies in Unity Catalog to groups, instead of to users individually. All // Databricks workspace identities can be assigned as members of groups, and // members inherit permissions that are assigned to their group. -type GroupsPreviewAPI struct { - groupsPreviewImpl +type GroupsAPI struct { + groupsImpl } // Delete a group. // // Deletes a group from the Databricks workspace. -func (a *GroupsPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.groupsPreviewImpl.Delete(ctx, DeleteGroupRequest{ +func (a *GroupsAPI) DeleteById(ctx context.Context, id string) error { + return a.groupsImpl.Delete(ctx, DeleteGroupRequest{ Id: id, }) } @@ -729,20 +729,20 @@ func (a *GroupsPreviewAPI) DeleteById(ctx context.Context, id string) error { // Get group details. // // Gets the information for a specific group in the Databricks workspace. -func (a *GroupsPreviewAPI) GetById(ctx context.Context, id string) (*Group, error) { - return a.groupsPreviewImpl.Get(ctx, GetGroupRequest{ +func (a *GroupsAPI) GetById(ctx context.Context, id string) (*Group, error) { + return a.groupsImpl.Get(ctx, GetGroupRequest{ Id: id, }) } -// GroupDisplayNameToIdMap calls [GroupsPreviewAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. +// GroupDisplayNameToIdMap calls [GroupsAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. // // Returns an error if there's more than one [Group] with the same .DisplayName. // // Note: All [Group] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *GroupsPreviewAPI) GroupDisplayNameToIdMap(ctx context.Context, request ListGroupsRequest) (map[string]string, error) { +func (a *GroupsAPI) GroupDisplayNameToIdMap(ctx context.Context, request ListGroupsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -760,14 +760,14 @@ func (a *GroupsPreviewAPI) GroupDisplayNameToIdMap(ctx context.Context, request return mapping, nil } -// GetByDisplayName calls [GroupsPreviewAPI.GroupDisplayNameToIdMap] and returns a single [Group]. +// GetByDisplayName calls [GroupsAPI.GroupDisplayNameToIdMap] and returns a single [Group]. // // Returns an error if there's more than one [Group] with the same .DisplayName. // // Note: All [Group] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *GroupsPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*Group, error) { +func (a *GroupsAPI) GetByDisplayName(ctx context.Context, name string) (*Group, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListGroupsRequest{}) if err != nil { @@ -788,15 +788,15 @@ func (a *GroupsPreviewAPI) GetByDisplayName(ctx context.Context, name string) (* return &alternatives[0], nil } -type PermissionMigrationPreviewInterface interface { +type PermissionMigrationInterface interface { // Migrate Permissions. MigratePermissions(ctx context.Context, request MigratePermissionsRequest) (*MigratePermissionsResponse, error) } -func NewPermissionMigrationPreview(client *client.DatabricksClient) *PermissionMigrationPreviewAPI { - return &PermissionMigrationPreviewAPI{ - permissionMigrationPreviewImpl: permissionMigrationPreviewImpl{ +func NewPermissionMigration(client *client.DatabricksClient) *PermissionMigrationAPI { + return &PermissionMigrationAPI{ + permissionMigrationImpl: permissionMigrationImpl{ client: client, }, } @@ -804,11 +804,11 @@ func NewPermissionMigrationPreview(client *client.DatabricksClient) *PermissionM // APIs for migrating acl permissions, used only by the ucx tool: // https://github.com/databrickslabs/ucx -type PermissionMigrationPreviewAPI struct { - permissionMigrationPreviewImpl +type PermissionMigrationAPI struct { + permissionMigrationImpl } -type PermissionsPreviewInterface interface { +type PermissionsInterface interface { // Get object permissions. // @@ -846,9 +846,9 @@ type PermissionsPreviewInterface interface { Update(ctx context.Context, request PermissionsRequest) (*ObjectPermissions, error) } -func NewPermissionsPreview(client *client.DatabricksClient) *PermissionsPreviewAPI { - return &PermissionsPreviewAPI{ - permissionsPreviewImpl: permissionsPreviewImpl{ +func NewPermissions(client *client.DatabricksClient) *PermissionsAPI { + return &PermissionsAPI{ + permissionsImpl: permissionsImpl{ client: client, }, } @@ -908,16 +908,16 @@ func NewPermissionsPreview(client *client.DatabricksClient) *PermissionsPreviewA // Access Control Proxy](:service:accountaccesscontrolproxy)**. // // [Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html -type PermissionsPreviewAPI struct { - permissionsPreviewImpl +type PermissionsAPI struct { + permissionsImpl } // Get object permissions. // // Gets the permissions of an object. Objects can inherit permissions from their // parent objects or root object. -func (a *PermissionsPreviewAPI) GetByRequestObjectTypeAndRequestObjectId(ctx context.Context, requestObjectType string, requestObjectId string) (*ObjectPermissions, error) { - return a.permissionsPreviewImpl.Get(ctx, GetPermissionRequest{ +func (a *PermissionsAPI) GetByRequestObjectTypeAndRequestObjectId(ctx context.Context, requestObjectType string, requestObjectId string) (*ObjectPermissions, error) { + return a.permissionsImpl.Get(ctx, GetPermissionRequest{ RequestObjectType: requestObjectType, RequestObjectId: requestObjectId, }) @@ -926,14 +926,14 @@ func (a *PermissionsPreviewAPI) GetByRequestObjectTypeAndRequestObjectId(ctx con // Get object permission levels. // // Gets the permission levels that a user can have on an object. -func (a *PermissionsPreviewAPI) GetPermissionLevelsByRequestObjectTypeAndRequestObjectId(ctx context.Context, requestObjectType string, requestObjectId string) (*GetPermissionLevelsResponse, error) { - return a.permissionsPreviewImpl.GetPermissionLevels(ctx, GetPermissionLevelsRequest{ +func (a *PermissionsAPI) GetPermissionLevelsByRequestObjectTypeAndRequestObjectId(ctx context.Context, requestObjectType string, requestObjectId string) (*GetPermissionLevelsResponse, error) { + return a.permissionsImpl.GetPermissionLevels(ctx, GetPermissionLevelsRequest{ RequestObjectType: requestObjectType, RequestObjectId: requestObjectId, }) } -type ServicePrincipalsPreviewInterface interface { +type ServicePrincipalsInterface interface { // Create a service principal. // @@ -976,7 +976,7 @@ type ServicePrincipalsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListServicePrincipalsRequest) ([]ServicePrincipal, error) - // ServicePrincipalDisplayNameToIdMap calls [ServicePrincipalsPreviewAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. + // ServicePrincipalDisplayNameToIdMap calls [ServicePrincipalsAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. // // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. // @@ -985,7 +985,7 @@ type ServicePrincipalsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ServicePrincipalDisplayNameToIdMap(ctx context.Context, request ListServicePrincipalsRequest) (map[string]string, error) - // GetByDisplayName calls [ServicePrincipalsPreviewAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. + // GetByDisplayName calls [ServicePrincipalsAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. // // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. // @@ -1008,9 +1008,9 @@ type ServicePrincipalsPreviewInterface interface { Update(ctx context.Context, request ServicePrincipal) error } -func NewServicePrincipalsPreview(client *client.DatabricksClient) *ServicePrincipalsPreviewAPI { - return &ServicePrincipalsPreviewAPI{ - servicePrincipalsPreviewImpl: servicePrincipalsPreviewImpl{ +func NewServicePrincipals(client *client.DatabricksClient) *ServicePrincipalsAPI { + return &ServicePrincipalsAPI{ + servicePrincipalsImpl: servicePrincipalsImpl{ client: client, }, } @@ -1022,15 +1022,15 @@ func NewServicePrincipalsPreview(client *client.DatabricksClient) *ServicePrinci // on production data run with service principals, interactive users do not need // any write, delete, or modify privileges in production. This eliminates the // risk of a user overwriting production data by accident. -type ServicePrincipalsPreviewAPI struct { - servicePrincipalsPreviewImpl +type ServicePrincipalsAPI struct { + servicePrincipalsImpl } // Delete a service principal. // // Delete a single service principal in the Databricks workspace. -func (a *ServicePrincipalsPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.servicePrincipalsPreviewImpl.Delete(ctx, DeleteServicePrincipalRequest{ +func (a *ServicePrincipalsAPI) DeleteById(ctx context.Context, id string) error { + return a.servicePrincipalsImpl.Delete(ctx, DeleteServicePrincipalRequest{ Id: id, }) } @@ -1039,20 +1039,20 @@ func (a *ServicePrincipalsPreviewAPI) DeleteById(ctx context.Context, id string) // // Gets the details for a single service principal define in the Databricks // workspace. -func (a *ServicePrincipalsPreviewAPI) GetById(ctx context.Context, id string) (*ServicePrincipal, error) { - return a.servicePrincipalsPreviewImpl.Get(ctx, GetServicePrincipalRequest{ +func (a *ServicePrincipalsAPI) GetById(ctx context.Context, id string) (*ServicePrincipal, error) { + return a.servicePrincipalsImpl.Get(ctx, GetServicePrincipalRequest{ Id: id, }) } -// ServicePrincipalDisplayNameToIdMap calls [ServicePrincipalsPreviewAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. +// ServicePrincipalDisplayNameToIdMap calls [ServicePrincipalsAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. // // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. // // Note: All [ServicePrincipal] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ServicePrincipalsPreviewAPI) ServicePrincipalDisplayNameToIdMap(ctx context.Context, request ListServicePrincipalsRequest) (map[string]string, error) { +func (a *ServicePrincipalsAPI) ServicePrincipalDisplayNameToIdMap(ctx context.Context, request ListServicePrincipalsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -1070,14 +1070,14 @@ func (a *ServicePrincipalsPreviewAPI) ServicePrincipalDisplayNameToIdMap(ctx con return mapping, nil } -// GetByDisplayName calls [ServicePrincipalsPreviewAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. +// GetByDisplayName calls [ServicePrincipalsAPI.ServicePrincipalDisplayNameToIdMap] and returns a single [ServicePrincipal]. // // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. // // Note: All [ServicePrincipal] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ServicePrincipalsPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*ServicePrincipal, error) { +func (a *ServicePrincipalsAPI) GetByDisplayName(ctx context.Context, name string) (*ServicePrincipal, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListServicePrincipalsRequest{}) if err != nil { @@ -1098,7 +1098,7 @@ func (a *ServicePrincipalsPreviewAPI) GetByDisplayName(ctx context.Context, name return &alternatives[0], nil } -type UsersPreviewInterface interface { +type UsersInterface interface { // Create a new user. // @@ -1153,7 +1153,7 @@ type UsersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListUsersRequest) ([]User, error) - // UserUserNameToIdMap calls [UsersPreviewAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. + // UserUserNameToIdMap calls [UsersAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. // // Returns an error if there's more than one [User] with the same .UserName. // @@ -1162,7 +1162,7 @@ type UsersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. UserUserNameToIdMap(ctx context.Context, request ListUsersRequest) (map[string]string, error) - // GetByUserName calls [UsersPreviewAPI.UserUserNameToIdMap] and returns a single [User]. + // GetByUserName calls [UsersAPI.UserUserNameToIdMap] and returns a single [User]. // // Returns an error if there's more than one [User] with the same .UserName. // @@ -1196,9 +1196,9 @@ type UsersPreviewInterface interface { UpdatePermissions(ctx context.Context, request PasswordPermissionsRequest) (*PasswordPermissions, error) } -func NewUsersPreview(client *client.DatabricksClient) *UsersPreviewAPI { - return &UsersPreviewAPI{ - usersPreviewImpl: usersPreviewImpl{ +func NewUsers(client *client.DatabricksClient) *UsersAPI { + return &UsersAPI{ + usersImpl: usersImpl{ client: client, }, } @@ -1215,16 +1215,16 @@ func NewUsersPreview(client *client.DatabricksClient) *UsersPreviewAPI { // identity provider and that user’s account will also be removed from // Databricks workspace. This ensures a consistent offboarding process and // prevents unauthorized users from accessing sensitive data. -type UsersPreviewAPI struct { - usersPreviewImpl +type UsersAPI struct { + usersImpl } // Delete a user. // // Deletes a user. Deleting a user from a Databricks workspace also removes // objects associated with the user. -func (a *UsersPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.usersPreviewImpl.Delete(ctx, DeleteUserRequest{ +func (a *UsersAPI) DeleteById(ctx context.Context, id string) error { + return a.usersImpl.Delete(ctx, DeleteUserRequest{ Id: id, }) } @@ -1232,20 +1232,20 @@ func (a *UsersPreviewAPI) DeleteById(ctx context.Context, id string) error { // Get user details. // // Gets information for a specific user in Databricks workspace. -func (a *UsersPreviewAPI) GetById(ctx context.Context, id string) (*User, error) { - return a.usersPreviewImpl.Get(ctx, GetUserRequest{ +func (a *UsersAPI) GetById(ctx context.Context, id string) (*User, error) { + return a.usersImpl.Get(ctx, GetUserRequest{ Id: id, }) } -// UserUserNameToIdMap calls [UsersPreviewAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. +// UserUserNameToIdMap calls [UsersAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. // // Returns an error if there's more than one [User] with the same .UserName. // // Note: All [User] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *UsersPreviewAPI) UserUserNameToIdMap(ctx context.Context, request ListUsersRequest) (map[string]string, error) { +func (a *UsersAPI) UserUserNameToIdMap(ctx context.Context, request ListUsersRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -1263,14 +1263,14 @@ func (a *UsersPreviewAPI) UserUserNameToIdMap(ctx context.Context, request ListU return mapping, nil } -// GetByUserName calls [UsersPreviewAPI.UserUserNameToIdMap] and returns a single [User]. +// GetByUserName calls [UsersAPI.UserUserNameToIdMap] and returns a single [User]. // // Returns an error if there's more than one [User] with the same .UserName. // // Note: All [User] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *UsersPreviewAPI) GetByUserName(ctx context.Context, name string) (*User, error) { +func (a *UsersAPI) GetByUserName(ctx context.Context, name string) (*User, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListUsersRequest{}) if err != nil { @@ -1291,7 +1291,7 @@ func (a *UsersPreviewAPI) GetByUserName(ctx context.Context, name string) (*User return &alternatives[0], nil } -type WorkspaceAssignmentPreviewInterface interface { +type WorkspaceAssignmentInterface interface { // Delete permissions assignment. // @@ -1346,9 +1346,9 @@ type WorkspaceAssignmentPreviewInterface interface { Update(ctx context.Context, request UpdateWorkspaceAssignments) (*PermissionAssignment, error) } -func NewWorkspaceAssignmentPreview(client *client.DatabricksClient) *WorkspaceAssignmentPreviewAPI { - return &WorkspaceAssignmentPreviewAPI{ - workspaceAssignmentPreviewImpl: workspaceAssignmentPreviewImpl{ +func NewWorkspaceAssignment(client *client.DatabricksClient) *WorkspaceAssignmentAPI { + return &WorkspaceAssignmentAPI{ + workspaceAssignmentImpl: workspaceAssignmentImpl{ client: client, }, } @@ -1356,16 +1356,16 @@ func NewWorkspaceAssignmentPreview(client *client.DatabricksClient) *WorkspaceAs // The Workspace Permission Assignment API allows you to manage workspace // permissions for principals in your account. -type WorkspaceAssignmentPreviewAPI struct { - workspaceAssignmentPreviewImpl +type WorkspaceAssignmentAPI struct { + workspaceAssignmentImpl } // Delete permissions assignment. // // Deletes the workspace permissions assignment in a given account and workspace // for the specified principal. -func (a *WorkspaceAssignmentPreviewAPI) DeleteByWorkspaceIdAndPrincipalId(ctx context.Context, workspaceId int64, principalId int64) error { - return a.workspaceAssignmentPreviewImpl.Delete(ctx, DeleteWorkspaceAssignmentRequest{ +func (a *WorkspaceAssignmentAPI) DeleteByWorkspaceIdAndPrincipalId(ctx context.Context, workspaceId int64, principalId int64) error { + return a.workspaceAssignmentImpl.Delete(ctx, DeleteWorkspaceAssignmentRequest{ WorkspaceId: workspaceId, PrincipalId: principalId, }) @@ -1375,8 +1375,8 @@ func (a *WorkspaceAssignmentPreviewAPI) DeleteByWorkspaceIdAndPrincipalId(ctx co // // Get an array of workspace permissions for the specified account and // workspace. -func (a *WorkspaceAssignmentPreviewAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*WorkspacePermissions, error) { - return a.workspaceAssignmentPreviewImpl.Get(ctx, GetWorkspaceAssignmentRequest{ +func (a *WorkspaceAssignmentAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*WorkspacePermissions, error) { + return a.workspaceAssignmentImpl.Get(ctx, GetWorkspaceAssignmentRequest{ WorkspaceId: workspaceId, }) } @@ -1385,8 +1385,8 @@ func (a *WorkspaceAssignmentPreviewAPI) GetByWorkspaceId(ctx context.Context, wo // // Get the permission assignments for the specified Databricks account and // Databricks workspace. -func (a *WorkspaceAssignmentPreviewAPI) ListByWorkspaceId(ctx context.Context, workspaceId int64) (*PermissionAssignments, error) { - return a.workspaceAssignmentPreviewImpl.internalList(ctx, ListWorkspaceAssignmentRequest{ +func (a *WorkspaceAssignmentAPI) ListByWorkspaceId(ctx context.Context, workspaceId int64) (*PermissionAssignments, error) { + return a.workspaceAssignmentImpl.internalList(ctx, ListWorkspaceAssignmentRequest{ WorkspaceId: workspaceId, }) } diff --git a/iam/v2preview/client.go b/iam/v2preview/client.go index e136a8a59..55cb39bdd 100755 --- a/iam/v2preview/client.go +++ b/iam/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type AccessControlPreviewClient struct { - AccessControlPreviewInterface +type AccessControlClient struct { + AccessControlInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewAccessControlPreviewClient(cfg *config.Config) (*AccessControlPreviewClient, error) { +func NewAccessControlClient(cfg *config.Config) (*AccessControlClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewAccessControlPreviewClient(cfg *config.Config) (*AccessControlPreviewCli return nil, err } - return &AccessControlPreviewClient{ - Config: cfg, - apiClient: apiClient, - AccessControlPreviewInterface: NewAccessControlPreview(databricksClient), + return &AccessControlClient{ + Config: cfg, + apiClient: apiClient, + AccessControlInterface: NewAccessControl(databricksClient), }, nil } -type AccountAccessControlPreviewClient struct { - AccountAccessControlPreviewInterface +type AccountAccessControlClient struct { + AccountAccessControlInterface Config *config.Config } -func NewAccountAccessControlPreviewClient(cfg *config.Config) (*AccountAccessControlPreviewClient, error) { +func NewAccountAccessControlClient(cfg *config.Config) (*AccountAccessControlClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -68,19 +68,19 @@ func NewAccountAccessControlPreviewClient(cfg *config.Config) (*AccountAccessCon return nil, err } - return &AccountAccessControlPreviewClient{ - Config: cfg, - AccountAccessControlPreviewInterface: NewAccountAccessControlPreview(apiClient), + return &AccountAccessControlClient{ + Config: cfg, + AccountAccessControlInterface: NewAccountAccessControl(apiClient), }, nil } -type AccountAccessControlProxyPreviewClient struct { - AccountAccessControlProxyPreviewInterface +type AccountAccessControlProxyClient struct { + AccountAccessControlProxyInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewAccountAccessControlProxyPreviewClient(cfg *config.Config) (*AccountAccessControlProxyPreviewClient, error) { +func NewAccountAccessControlProxyClient(cfg *config.Config) (*AccountAccessControlProxyClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -101,20 +101,20 @@ func NewAccountAccessControlProxyPreviewClient(cfg *config.Config) (*AccountAcce return nil, err } - return &AccountAccessControlProxyPreviewClient{ - Config: cfg, - apiClient: apiClient, - AccountAccessControlProxyPreviewInterface: NewAccountAccessControlProxyPreview(databricksClient), + return &AccountAccessControlProxyClient{ + Config: cfg, + apiClient: apiClient, + AccountAccessControlProxyInterface: NewAccountAccessControlProxy(databricksClient), }, nil } -type AccountGroupsPreviewClient struct { - AccountGroupsPreviewInterface +type AccountGroupsClient struct { + AccountGroupsInterface Config *config.Config } -func NewAccountGroupsPreviewClient(cfg *config.Config) (*AccountGroupsPreviewClient, error) { +func NewAccountGroupsClient(cfg *config.Config) (*AccountGroupsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -132,19 +132,19 @@ func NewAccountGroupsPreviewClient(cfg *config.Config) (*AccountGroupsPreviewCli return nil, err } - return &AccountGroupsPreviewClient{ - Config: cfg, - AccountGroupsPreviewInterface: NewAccountGroupsPreview(apiClient), + return &AccountGroupsClient{ + Config: cfg, + AccountGroupsInterface: NewAccountGroups(apiClient), }, nil } -type AccountServicePrincipalsPreviewClient struct { - AccountServicePrincipalsPreviewInterface +type AccountServicePrincipalsClient struct { + AccountServicePrincipalsInterface Config *config.Config } -func NewAccountServicePrincipalsPreviewClient(cfg *config.Config) (*AccountServicePrincipalsPreviewClient, error) { +func NewAccountServicePrincipalsClient(cfg *config.Config) (*AccountServicePrincipalsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -162,19 +162,19 @@ func NewAccountServicePrincipalsPreviewClient(cfg *config.Config) (*AccountServi return nil, err } - return &AccountServicePrincipalsPreviewClient{ - Config: cfg, - AccountServicePrincipalsPreviewInterface: NewAccountServicePrincipalsPreview(apiClient), + return &AccountServicePrincipalsClient{ + Config: cfg, + AccountServicePrincipalsInterface: NewAccountServicePrincipals(apiClient), }, nil } -type AccountUsersPreviewClient struct { - AccountUsersPreviewInterface +type AccountUsersClient struct { + AccountUsersInterface Config *config.Config } -func NewAccountUsersPreviewClient(cfg *config.Config) (*AccountUsersPreviewClient, error) { +func NewAccountUsersClient(cfg *config.Config) (*AccountUsersClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -192,19 +192,19 @@ func NewAccountUsersPreviewClient(cfg *config.Config) (*AccountUsersPreviewClien return nil, err } - return &AccountUsersPreviewClient{ - Config: cfg, - AccountUsersPreviewInterface: NewAccountUsersPreview(apiClient), + return &AccountUsersClient{ + Config: cfg, + AccountUsersInterface: NewAccountUsers(apiClient), }, nil } -type CurrentUserPreviewClient struct { - CurrentUserPreviewInterface +type CurrentUserClient struct { + CurrentUserInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewCurrentUserPreviewClient(cfg *config.Config) (*CurrentUserPreviewClient, error) { +func NewCurrentUserClient(cfg *config.Config) (*CurrentUserClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -225,20 +225,20 @@ func NewCurrentUserPreviewClient(cfg *config.Config) (*CurrentUserPreviewClient, return nil, err } - return &CurrentUserPreviewClient{ - Config: cfg, - apiClient: apiClient, - CurrentUserPreviewInterface: NewCurrentUserPreview(databricksClient), + return &CurrentUserClient{ + Config: cfg, + apiClient: apiClient, + CurrentUserInterface: NewCurrentUser(databricksClient), }, nil } -type GroupsPreviewClient struct { - GroupsPreviewInterface +type GroupsClient struct { + GroupsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewGroupsPreviewClient(cfg *config.Config) (*GroupsPreviewClient, error) { +func NewGroupsClient(cfg *config.Config) (*GroupsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -259,20 +259,20 @@ func NewGroupsPreviewClient(cfg *config.Config) (*GroupsPreviewClient, error) { return nil, err } - return &GroupsPreviewClient{ - Config: cfg, - apiClient: apiClient, - GroupsPreviewInterface: NewGroupsPreview(databricksClient), + return &GroupsClient{ + Config: cfg, + apiClient: apiClient, + GroupsInterface: NewGroups(databricksClient), }, nil } -type PermissionMigrationPreviewClient struct { - PermissionMigrationPreviewInterface +type PermissionMigrationClient struct { + PermissionMigrationInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewPermissionMigrationPreviewClient(cfg *config.Config) (*PermissionMigrationPreviewClient, error) { +func NewPermissionMigrationClient(cfg *config.Config) (*PermissionMigrationClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -293,20 +293,20 @@ func NewPermissionMigrationPreviewClient(cfg *config.Config) (*PermissionMigrati return nil, err } - return &PermissionMigrationPreviewClient{ - Config: cfg, - apiClient: apiClient, - PermissionMigrationPreviewInterface: NewPermissionMigrationPreview(databricksClient), + return &PermissionMigrationClient{ + Config: cfg, + apiClient: apiClient, + PermissionMigrationInterface: NewPermissionMigration(databricksClient), }, nil } -type PermissionsPreviewClient struct { - PermissionsPreviewInterface +type PermissionsClient struct { + PermissionsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewPermissionsPreviewClient(cfg *config.Config) (*PermissionsPreviewClient, error) { +func NewPermissionsClient(cfg *config.Config) (*PermissionsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -327,20 +327,20 @@ func NewPermissionsPreviewClient(cfg *config.Config) (*PermissionsPreviewClient, return nil, err } - return &PermissionsPreviewClient{ - Config: cfg, - apiClient: apiClient, - PermissionsPreviewInterface: NewPermissionsPreview(databricksClient), + return &PermissionsClient{ + Config: cfg, + apiClient: apiClient, + PermissionsInterface: NewPermissions(databricksClient), }, nil } -type ServicePrincipalsPreviewClient struct { - ServicePrincipalsPreviewInterface +type ServicePrincipalsClient struct { + ServicePrincipalsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewServicePrincipalsPreviewClient(cfg *config.Config) (*ServicePrincipalsPreviewClient, error) { +func NewServicePrincipalsClient(cfg *config.Config) (*ServicePrincipalsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -361,20 +361,20 @@ func NewServicePrincipalsPreviewClient(cfg *config.Config) (*ServicePrincipalsPr return nil, err } - return &ServicePrincipalsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ServicePrincipalsPreviewInterface: NewServicePrincipalsPreview(databricksClient), + return &ServicePrincipalsClient{ + Config: cfg, + apiClient: apiClient, + ServicePrincipalsInterface: NewServicePrincipals(databricksClient), }, nil } -type UsersPreviewClient struct { - UsersPreviewInterface +type UsersClient struct { + UsersInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewUsersPreviewClient(cfg *config.Config) (*UsersPreviewClient, error) { +func NewUsersClient(cfg *config.Config) (*UsersClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -395,20 +395,20 @@ func NewUsersPreviewClient(cfg *config.Config) (*UsersPreviewClient, error) { return nil, err } - return &UsersPreviewClient{ - Config: cfg, - apiClient: apiClient, - UsersPreviewInterface: NewUsersPreview(databricksClient), + return &UsersClient{ + Config: cfg, + apiClient: apiClient, + UsersInterface: NewUsers(databricksClient), }, nil } -type WorkspaceAssignmentPreviewClient struct { - WorkspaceAssignmentPreviewInterface +type WorkspaceAssignmentClient struct { + WorkspaceAssignmentInterface Config *config.Config } -func NewWorkspaceAssignmentPreviewClient(cfg *config.Config) (*WorkspaceAssignmentPreviewClient, error) { +func NewWorkspaceAssignmentClient(cfg *config.Config) (*WorkspaceAssignmentClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -426,8 +426,8 @@ func NewWorkspaceAssignmentPreviewClient(cfg *config.Config) (*WorkspaceAssignme return nil, err } - return &WorkspaceAssignmentPreviewClient{ - Config: cfg, - WorkspaceAssignmentPreviewInterface: NewWorkspaceAssignmentPreview(apiClient), + return &WorkspaceAssignmentClient{ + Config: cfg, + WorkspaceAssignmentInterface: NewWorkspaceAssignment(apiClient), }, nil } diff --git a/iam/v2preview/impl.go b/iam/v2preview/impl.go index afda9e289..f177fc189 100755 --- a/iam/v2preview/impl.go +++ b/iam/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just AccessControlPreview API methods -type accessControlPreviewImpl struct { +// unexported type that holds implementations of just AccessControl API methods +type accessControlImpl struct { client *client.DatabricksClient } -func (a *accessControlPreviewImpl) CheckPolicy(ctx context.Context, request CheckPolicyRequest) (*CheckPolicyResponse, error) { +func (a *accessControlImpl) CheckPolicy(ctx context.Context, request CheckPolicyRequest) (*CheckPolicyResponse, error) { var checkPolicyResponse CheckPolicyResponse path := "/api/2.0preview/access-control/check-policy-v2" queryParams := make(map[string]any) @@ -27,12 +27,12 @@ func (a *accessControlPreviewImpl) CheckPolicy(ctx context.Context, request Chec return &checkPolicyResponse, err } -// unexported type that holds implementations of just AccountAccessControlPreview API methods -type accountAccessControlPreviewImpl struct { +// unexported type that holds implementations of just AccountAccessControl API methods +type accountAccessControlImpl struct { client *client.DatabricksClient } -func (a *accountAccessControlPreviewImpl) GetAssignableRolesForResource(ctx context.Context, request GetAssignableRolesForResourceRequest) (*GetAssignableRolesForResourceResponse, error) { +func (a *accountAccessControlImpl) GetAssignableRolesForResource(ctx context.Context, request GetAssignableRolesForResourceRequest) (*GetAssignableRolesForResourceResponse, error) { var getAssignableRolesForResourceResponse GetAssignableRolesForResourceResponse path := fmt.Sprintf("/api/2.0preview/preview/accounts/%v/access-control/assignable-roles", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -42,7 +42,7 @@ func (a *accountAccessControlPreviewImpl) GetAssignableRolesForResource(ctx cont return &getAssignableRolesForResourceResponse, err } -func (a *accountAccessControlPreviewImpl) GetRuleSet(ctx context.Context, request GetRuleSetRequest) (*RuleSetResponse, error) { +func (a *accountAccessControlImpl) GetRuleSet(ctx context.Context, request GetRuleSetRequest) (*RuleSetResponse, error) { var ruleSetResponse RuleSetResponse path := fmt.Sprintf("/api/2.0preview/preview/accounts/%v/access-control/rule-sets", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -52,7 +52,7 @@ func (a *accountAccessControlPreviewImpl) GetRuleSet(ctx context.Context, reques return &ruleSetResponse, err } -func (a *accountAccessControlPreviewImpl) UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) { +func (a *accountAccessControlImpl) UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) { var ruleSetResponse RuleSetResponse path := fmt.Sprintf("/api/2.0preview/preview/accounts/%v/access-control/rule-sets", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -63,12 +63,12 @@ func (a *accountAccessControlPreviewImpl) UpdateRuleSet(ctx context.Context, req return &ruleSetResponse, err } -// unexported type that holds implementations of just AccountAccessControlProxyPreview API methods -type accountAccessControlProxyPreviewImpl struct { +// unexported type that holds implementations of just AccountAccessControlProxy API methods +type accountAccessControlProxyImpl struct { client *client.DatabricksClient } -func (a *accountAccessControlProxyPreviewImpl) GetAssignableRolesForResource(ctx context.Context, request GetAssignableRolesForResourceRequest) (*GetAssignableRolesForResourceResponse, error) { +func (a *accountAccessControlProxyImpl) GetAssignableRolesForResource(ctx context.Context, request GetAssignableRolesForResourceRequest) (*GetAssignableRolesForResourceResponse, error) { var getAssignableRolesForResourceResponse GetAssignableRolesForResourceResponse path := "/api/2.0preview/preview/accounts/access-control/assignable-roles" queryParams := make(map[string]any) @@ -78,7 +78,7 @@ func (a *accountAccessControlProxyPreviewImpl) GetAssignableRolesForResource(ctx return &getAssignableRolesForResourceResponse, err } -func (a *accountAccessControlProxyPreviewImpl) GetRuleSet(ctx context.Context, request GetRuleSetRequest) (*RuleSetResponse, error) { +func (a *accountAccessControlProxyImpl) GetRuleSet(ctx context.Context, request GetRuleSetRequest) (*RuleSetResponse, error) { var ruleSetResponse RuleSetResponse path := "/api/2.0preview/preview/accounts/access-control/rule-sets" queryParams := make(map[string]any) @@ -88,7 +88,7 @@ func (a *accountAccessControlProxyPreviewImpl) GetRuleSet(ctx context.Context, r return &ruleSetResponse, err } -func (a *accountAccessControlProxyPreviewImpl) UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) { +func (a *accountAccessControlProxyImpl) UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) { var ruleSetResponse RuleSetResponse path := "/api/2.0preview/preview/accounts/access-control/rule-sets" queryParams := make(map[string]any) @@ -99,12 +99,12 @@ func (a *accountAccessControlProxyPreviewImpl) UpdateRuleSet(ctx context.Context return &ruleSetResponse, err } -// unexported type that holds implementations of just AccountGroupsPreview API methods -type accountGroupsPreviewImpl struct { +// unexported type that holds implementations of just AccountGroups API methods +type accountGroupsImpl struct { client *client.DatabricksClient } -func (a *accountGroupsPreviewImpl) Create(ctx context.Context, request Group) (*Group, error) { +func (a *accountGroupsImpl) Create(ctx context.Context, request Group) (*Group, error) { var group Group path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -115,7 +115,7 @@ func (a *accountGroupsPreviewImpl) Create(ctx context.Context, request Group) (* return &group, err } -func (a *accountGroupsPreviewImpl) Delete(ctx context.Context, request DeleteAccountGroupRequest) error { +func (a *accountGroupsImpl) Delete(ctx context.Context, request DeleteAccountGroupRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -124,7 +124,7 @@ func (a *accountGroupsPreviewImpl) Delete(ctx context.Context, request DeleteAcc return err } -func (a *accountGroupsPreviewImpl) Get(ctx context.Context, request GetAccountGroupRequest) (*Group, error) { +func (a *accountGroupsImpl) Get(ctx context.Context, request GetAccountGroupRequest) (*Group, error) { var group Group path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -137,7 +137,7 @@ func (a *accountGroupsPreviewImpl) Get(ctx context.Context, request GetAccountGr // List group details. // // Gets all details of the groups associated with the Databricks account. -func (a *accountGroupsPreviewImpl) List(ctx context.Context, request ListAccountGroupsRequest) listing.Iterator[Group] { +func (a *accountGroupsImpl) List(ctx context.Context, request ListAccountGroupsRequest) listing.Iterator[Group] { request.StartIndex = 1 // SCIM offset starts from 1 if request.Count == 0 { @@ -173,12 +173,12 @@ func (a *accountGroupsPreviewImpl) List(ctx context.Context, request ListAccount // List group details. // // Gets all details of the groups associated with the Databricks account. -func (a *accountGroupsPreviewImpl) ListAll(ctx context.Context, request ListAccountGroupsRequest) ([]Group, error) { +func (a *accountGroupsImpl) ListAll(ctx context.Context, request ListAccountGroupsRequest) ([]Group, error) { iterator := a.List(ctx, request) return listing.ToSliceN[Group, int64](ctx, iterator, request.Count) } -func (a *accountGroupsPreviewImpl) internalList(ctx context.Context, request ListAccountGroupsRequest) (*ListGroupsResponse, error) { +func (a *accountGroupsImpl) internalList(ctx context.Context, request ListAccountGroupsRequest) (*ListGroupsResponse, error) { var listGroupsResponse ListGroupsResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -188,7 +188,7 @@ func (a *accountGroupsPreviewImpl) internalList(ctx context.Context, request Lis return &listGroupsResponse, err } -func (a *accountGroupsPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { +func (a *accountGroupsImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -199,7 +199,7 @@ func (a *accountGroupsPreviewImpl) Patch(ctx context.Context, request PartialUpd return err } -func (a *accountGroupsPreviewImpl) Update(ctx context.Context, request Group) error { +func (a *accountGroupsImpl) Update(ctx context.Context, request Group) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -210,12 +210,12 @@ func (a *accountGroupsPreviewImpl) Update(ctx context.Context, request Group) er return err } -// unexported type that holds implementations of just AccountServicePrincipalsPreview API methods -type accountServicePrincipalsPreviewImpl struct { +// unexported type that holds implementations of just AccountServicePrincipals API methods +type accountServicePrincipalsImpl struct { client *client.DatabricksClient } -func (a *accountServicePrincipalsPreviewImpl) Create(ctx context.Context, request ServicePrincipal) (*ServicePrincipal, error) { +func (a *accountServicePrincipalsImpl) Create(ctx context.Context, request ServicePrincipal) (*ServicePrincipal, error) { var servicePrincipal ServicePrincipal path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -226,7 +226,7 @@ func (a *accountServicePrincipalsPreviewImpl) Create(ctx context.Context, reques return &servicePrincipal, err } -func (a *accountServicePrincipalsPreviewImpl) Delete(ctx context.Context, request DeleteAccountServicePrincipalRequest) error { +func (a *accountServicePrincipalsImpl) Delete(ctx context.Context, request DeleteAccountServicePrincipalRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -235,7 +235,7 @@ func (a *accountServicePrincipalsPreviewImpl) Delete(ctx context.Context, reques return err } -func (a *accountServicePrincipalsPreviewImpl) Get(ctx context.Context, request GetAccountServicePrincipalRequest) (*ServicePrincipal, error) { +func (a *accountServicePrincipalsImpl) Get(ctx context.Context, request GetAccountServicePrincipalRequest) (*ServicePrincipal, error) { var servicePrincipal ServicePrincipal path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -248,7 +248,7 @@ func (a *accountServicePrincipalsPreviewImpl) Get(ctx context.Context, request G // List service principals. // // Gets the set of service principals associated with a Databricks account. -func (a *accountServicePrincipalsPreviewImpl) List(ctx context.Context, request ListAccountServicePrincipalsRequest) listing.Iterator[ServicePrincipal] { +func (a *accountServicePrincipalsImpl) List(ctx context.Context, request ListAccountServicePrincipalsRequest) listing.Iterator[ServicePrincipal] { request.StartIndex = 1 // SCIM offset starts from 1 if request.Count == 0 { @@ -284,12 +284,12 @@ func (a *accountServicePrincipalsPreviewImpl) List(ctx context.Context, request // List service principals. // // Gets the set of service principals associated with a Databricks account. -func (a *accountServicePrincipalsPreviewImpl) ListAll(ctx context.Context, request ListAccountServicePrincipalsRequest) ([]ServicePrincipal, error) { +func (a *accountServicePrincipalsImpl) ListAll(ctx context.Context, request ListAccountServicePrincipalsRequest) ([]ServicePrincipal, error) { iterator := a.List(ctx, request) return listing.ToSliceN[ServicePrincipal, int64](ctx, iterator, request.Count) } -func (a *accountServicePrincipalsPreviewImpl) internalList(ctx context.Context, request ListAccountServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { +func (a *accountServicePrincipalsImpl) internalList(ctx context.Context, request ListAccountServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { var listServicePrincipalResponse ListServicePrincipalResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -299,7 +299,7 @@ func (a *accountServicePrincipalsPreviewImpl) internalList(ctx context.Context, return &listServicePrincipalResponse, err } -func (a *accountServicePrincipalsPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { +func (a *accountServicePrincipalsImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -310,7 +310,7 @@ func (a *accountServicePrincipalsPreviewImpl) Patch(ctx context.Context, request return err } -func (a *accountServicePrincipalsPreviewImpl) Update(ctx context.Context, request ServicePrincipal) error { +func (a *accountServicePrincipalsImpl) Update(ctx context.Context, request ServicePrincipal) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -321,12 +321,12 @@ func (a *accountServicePrincipalsPreviewImpl) Update(ctx context.Context, reques return err } -// unexported type that holds implementations of just AccountUsersPreview API methods -type accountUsersPreviewImpl struct { +// unexported type that holds implementations of just AccountUsers API methods +type accountUsersImpl struct { client *client.DatabricksClient } -func (a *accountUsersPreviewImpl) Create(ctx context.Context, request User) (*User, error) { +func (a *accountUsersImpl) Create(ctx context.Context, request User) (*User, error) { var user User path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -337,7 +337,7 @@ func (a *accountUsersPreviewImpl) Create(ctx context.Context, request User) (*Us return &user, err } -func (a *accountUsersPreviewImpl) Delete(ctx context.Context, request DeleteAccountUserRequest) error { +func (a *accountUsersImpl) Delete(ctx context.Context, request DeleteAccountUserRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -346,7 +346,7 @@ func (a *accountUsersPreviewImpl) Delete(ctx context.Context, request DeleteAcco return err } -func (a *accountUsersPreviewImpl) Get(ctx context.Context, request GetAccountUserRequest) (*User, error) { +func (a *accountUsersImpl) Get(ctx context.Context, request GetAccountUserRequest) (*User, error) { var user User path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -359,7 +359,7 @@ func (a *accountUsersPreviewImpl) Get(ctx context.Context, request GetAccountUse // List users. // // Gets details for all the users associated with a Databricks account. -func (a *accountUsersPreviewImpl) List(ctx context.Context, request ListAccountUsersRequest) listing.Iterator[User] { +func (a *accountUsersImpl) List(ctx context.Context, request ListAccountUsersRequest) listing.Iterator[User] { request.StartIndex = 1 // SCIM offset starts from 1 if request.Count == 0 { @@ -395,12 +395,12 @@ func (a *accountUsersPreviewImpl) List(ctx context.Context, request ListAccountU // List users. // // Gets details for all the users associated with a Databricks account. -func (a *accountUsersPreviewImpl) ListAll(ctx context.Context, request ListAccountUsersRequest) ([]User, error) { +func (a *accountUsersImpl) ListAll(ctx context.Context, request ListAccountUsersRequest) ([]User, error) { iterator := a.List(ctx, request) return listing.ToSliceN[User, int64](ctx, iterator, request.Count) } -func (a *accountUsersPreviewImpl) internalList(ctx context.Context, request ListAccountUsersRequest) (*ListUsersResponse, error) { +func (a *accountUsersImpl) internalList(ctx context.Context, request ListAccountUsersRequest) (*ListUsersResponse, error) { var listUsersResponse ListUsersResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -410,7 +410,7 @@ func (a *accountUsersPreviewImpl) internalList(ctx context.Context, request List return &listUsersResponse, err } -func (a *accountUsersPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { +func (a *accountUsersImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -421,7 +421,7 @@ func (a *accountUsersPreviewImpl) Patch(ctx context.Context, request PartialUpda return err } -func (a *accountUsersPreviewImpl) Update(ctx context.Context, request User) error { +func (a *accountUsersImpl) Update(ctx context.Context, request User) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) @@ -432,12 +432,12 @@ func (a *accountUsersPreviewImpl) Update(ctx context.Context, request User) erro return err } -// unexported type that holds implementations of just CurrentUserPreview API methods -type currentUserPreviewImpl struct { +// unexported type that holds implementations of just CurrentUser API methods +type currentUserImpl struct { client *client.DatabricksClient } -func (a *currentUserPreviewImpl) Me(ctx context.Context) (*User, error) { +func (a *currentUserImpl) Me(ctx context.Context) (*User, error) { var user User path := "/api/2.0preview/preview/scim/v2/Me" @@ -447,12 +447,12 @@ func (a *currentUserPreviewImpl) Me(ctx context.Context) (*User, error) { return &user, err } -// unexported type that holds implementations of just GroupsPreview API methods -type groupsPreviewImpl struct { +// unexported type that holds implementations of just Groups API methods +type groupsImpl struct { client *client.DatabricksClient } -func (a *groupsPreviewImpl) Create(ctx context.Context, request Group) (*Group, error) { +func (a *groupsImpl) Create(ctx context.Context, request Group) (*Group, error) { var group Group path := "/api/2.0preview/preview/scim/v2/Groups" queryParams := make(map[string]any) @@ -463,7 +463,7 @@ func (a *groupsPreviewImpl) Create(ctx context.Context, request Group) (*Group, return &group, err } -func (a *groupsPreviewImpl) Delete(ctx context.Context, request DeleteGroupRequest) error { +func (a *groupsImpl) Delete(ctx context.Context, request DeleteGroupRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Groups/%v", request.Id) queryParams := make(map[string]any) @@ -472,7 +472,7 @@ func (a *groupsPreviewImpl) Delete(ctx context.Context, request DeleteGroupReque return err } -func (a *groupsPreviewImpl) Get(ctx context.Context, request GetGroupRequest) (*Group, error) { +func (a *groupsImpl) Get(ctx context.Context, request GetGroupRequest) (*Group, error) { var group Group path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Groups/%v", request.Id) queryParams := make(map[string]any) @@ -485,7 +485,7 @@ func (a *groupsPreviewImpl) Get(ctx context.Context, request GetGroupRequest) (* // List group details. // // Gets all details of the groups associated with the Databricks workspace. -func (a *groupsPreviewImpl) List(ctx context.Context, request ListGroupsRequest) listing.Iterator[Group] { +func (a *groupsImpl) List(ctx context.Context, request ListGroupsRequest) listing.Iterator[Group] { request.StartIndex = 1 // SCIM offset starts from 1 if request.Count == 0 { @@ -521,12 +521,12 @@ func (a *groupsPreviewImpl) List(ctx context.Context, request ListGroupsRequest) // List group details. // // Gets all details of the groups associated with the Databricks workspace. -func (a *groupsPreviewImpl) ListAll(ctx context.Context, request ListGroupsRequest) ([]Group, error) { +func (a *groupsImpl) ListAll(ctx context.Context, request ListGroupsRequest) ([]Group, error) { iterator := a.List(ctx, request) return listing.ToSliceN[Group, int64](ctx, iterator, request.Count) } -func (a *groupsPreviewImpl) internalList(ctx context.Context, request ListGroupsRequest) (*ListGroupsResponse, error) { +func (a *groupsImpl) internalList(ctx context.Context, request ListGroupsRequest) (*ListGroupsResponse, error) { var listGroupsResponse ListGroupsResponse path := "/api/2.0preview/preview/scim/v2/Groups" queryParams := make(map[string]any) @@ -536,7 +536,7 @@ func (a *groupsPreviewImpl) internalList(ctx context.Context, request ListGroups return &listGroupsResponse, err } -func (a *groupsPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { +func (a *groupsImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Groups/%v", request.Id) queryParams := make(map[string]any) @@ -547,7 +547,7 @@ func (a *groupsPreviewImpl) Patch(ctx context.Context, request PartialUpdate) er return err } -func (a *groupsPreviewImpl) Update(ctx context.Context, request Group) error { +func (a *groupsImpl) Update(ctx context.Context, request Group) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Groups/%v", request.Id) queryParams := make(map[string]any) @@ -558,12 +558,12 @@ func (a *groupsPreviewImpl) Update(ctx context.Context, request Group) error { return err } -// unexported type that holds implementations of just PermissionMigrationPreview API methods -type permissionMigrationPreviewImpl struct { +// unexported type that holds implementations of just PermissionMigration API methods +type permissionMigrationImpl struct { client *client.DatabricksClient } -func (a *permissionMigrationPreviewImpl) MigratePermissions(ctx context.Context, request MigratePermissionsRequest) (*MigratePermissionsResponse, error) { +func (a *permissionMigrationImpl) MigratePermissions(ctx context.Context, request MigratePermissionsRequest) (*MigratePermissionsResponse, error) { var migratePermissionsResponse MigratePermissionsResponse path := "/api/2.0preview/permissionmigration" queryParams := make(map[string]any) @@ -574,12 +574,12 @@ func (a *permissionMigrationPreviewImpl) MigratePermissions(ctx context.Context, return &migratePermissionsResponse, err } -// unexported type that holds implementations of just PermissionsPreview API methods -type permissionsPreviewImpl struct { +// unexported type that holds implementations of just Permissions API methods +type permissionsImpl struct { client *client.DatabricksClient } -func (a *permissionsPreviewImpl) Get(ctx context.Context, request GetPermissionRequest) (*ObjectPermissions, error) { +func (a *permissionsImpl) Get(ctx context.Context, request GetPermissionRequest) (*ObjectPermissions, error) { var objectPermissions ObjectPermissions path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.RequestObjectType, request.RequestObjectId) queryParams := make(map[string]any) @@ -589,7 +589,7 @@ func (a *permissionsPreviewImpl) Get(ctx context.Context, request GetPermissionR return &objectPermissions, err } -func (a *permissionsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetPermissionLevelsRequest) (*GetPermissionLevelsResponse, error) { +func (a *permissionsImpl) GetPermissionLevels(ctx context.Context, request GetPermissionLevelsRequest) (*GetPermissionLevelsResponse, error) { var getPermissionLevelsResponse GetPermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v/permissionLevels", request.RequestObjectType, request.RequestObjectId) queryParams := make(map[string]any) @@ -599,7 +599,7 @@ func (a *permissionsPreviewImpl) GetPermissionLevels(ctx context.Context, reques return &getPermissionLevelsResponse, err } -func (a *permissionsPreviewImpl) Set(ctx context.Context, request PermissionsRequest) (*ObjectPermissions, error) { +func (a *permissionsImpl) Set(ctx context.Context, request PermissionsRequest) (*ObjectPermissions, error) { var objectPermissions ObjectPermissions path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.RequestObjectType, request.RequestObjectId) queryParams := make(map[string]any) @@ -610,7 +610,7 @@ func (a *permissionsPreviewImpl) Set(ctx context.Context, request PermissionsReq return &objectPermissions, err } -func (a *permissionsPreviewImpl) Update(ctx context.Context, request PermissionsRequest) (*ObjectPermissions, error) { +func (a *permissionsImpl) Update(ctx context.Context, request PermissionsRequest) (*ObjectPermissions, error) { var objectPermissions ObjectPermissions path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.RequestObjectType, request.RequestObjectId) queryParams := make(map[string]any) @@ -621,12 +621,12 @@ func (a *permissionsPreviewImpl) Update(ctx context.Context, request Permissions return &objectPermissions, err } -// unexported type that holds implementations of just ServicePrincipalsPreview API methods -type servicePrincipalsPreviewImpl struct { +// unexported type that holds implementations of just ServicePrincipals API methods +type servicePrincipalsImpl struct { client *client.DatabricksClient } -func (a *servicePrincipalsPreviewImpl) Create(ctx context.Context, request ServicePrincipal) (*ServicePrincipal, error) { +func (a *servicePrincipalsImpl) Create(ctx context.Context, request ServicePrincipal) (*ServicePrincipal, error) { var servicePrincipal ServicePrincipal path := "/api/2.0preview/preview/scim/v2/ServicePrincipals" queryParams := make(map[string]any) @@ -637,7 +637,7 @@ func (a *servicePrincipalsPreviewImpl) Create(ctx context.Context, request Servi return &servicePrincipal, err } -func (a *servicePrincipalsPreviewImpl) Delete(ctx context.Context, request DeleteServicePrincipalRequest) error { +func (a *servicePrincipalsImpl) Delete(ctx context.Context, request DeleteServicePrincipalRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/ServicePrincipals/%v", request.Id) queryParams := make(map[string]any) @@ -646,7 +646,7 @@ func (a *servicePrincipalsPreviewImpl) Delete(ctx context.Context, request Delet return err } -func (a *servicePrincipalsPreviewImpl) Get(ctx context.Context, request GetServicePrincipalRequest) (*ServicePrincipal, error) { +func (a *servicePrincipalsImpl) Get(ctx context.Context, request GetServicePrincipalRequest) (*ServicePrincipal, error) { var servicePrincipal ServicePrincipal path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/ServicePrincipals/%v", request.Id) queryParams := make(map[string]any) @@ -659,7 +659,7 @@ func (a *servicePrincipalsPreviewImpl) Get(ctx context.Context, request GetServi // List service principals. // // Gets the set of service principals associated with a Databricks workspace. -func (a *servicePrincipalsPreviewImpl) List(ctx context.Context, request ListServicePrincipalsRequest) listing.Iterator[ServicePrincipal] { +func (a *servicePrincipalsImpl) List(ctx context.Context, request ListServicePrincipalsRequest) listing.Iterator[ServicePrincipal] { request.StartIndex = 1 // SCIM offset starts from 1 if request.Count == 0 { @@ -695,12 +695,12 @@ func (a *servicePrincipalsPreviewImpl) List(ctx context.Context, request ListSer // List service principals. // // Gets the set of service principals associated with a Databricks workspace. -func (a *servicePrincipalsPreviewImpl) ListAll(ctx context.Context, request ListServicePrincipalsRequest) ([]ServicePrincipal, error) { +func (a *servicePrincipalsImpl) ListAll(ctx context.Context, request ListServicePrincipalsRequest) ([]ServicePrincipal, error) { iterator := a.List(ctx, request) return listing.ToSliceN[ServicePrincipal, int64](ctx, iterator, request.Count) } -func (a *servicePrincipalsPreviewImpl) internalList(ctx context.Context, request ListServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { +func (a *servicePrincipalsImpl) internalList(ctx context.Context, request ListServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { var listServicePrincipalResponse ListServicePrincipalResponse path := "/api/2.0preview/preview/scim/v2/ServicePrincipals" queryParams := make(map[string]any) @@ -710,7 +710,7 @@ func (a *servicePrincipalsPreviewImpl) internalList(ctx context.Context, request return &listServicePrincipalResponse, err } -func (a *servicePrincipalsPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { +func (a *servicePrincipalsImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/ServicePrincipals/%v", request.Id) queryParams := make(map[string]any) @@ -721,7 +721,7 @@ func (a *servicePrincipalsPreviewImpl) Patch(ctx context.Context, request Partia return err } -func (a *servicePrincipalsPreviewImpl) Update(ctx context.Context, request ServicePrincipal) error { +func (a *servicePrincipalsImpl) Update(ctx context.Context, request ServicePrincipal) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/ServicePrincipals/%v", request.Id) queryParams := make(map[string]any) @@ -732,12 +732,12 @@ func (a *servicePrincipalsPreviewImpl) Update(ctx context.Context, request Servi return err } -// unexported type that holds implementations of just UsersPreview API methods -type usersPreviewImpl struct { +// unexported type that holds implementations of just Users API methods +type usersImpl struct { client *client.DatabricksClient } -func (a *usersPreviewImpl) Create(ctx context.Context, request User) (*User, error) { +func (a *usersImpl) Create(ctx context.Context, request User) (*User, error) { var user User path := "/api/2.0preview/preview/scim/v2/Users" queryParams := make(map[string]any) @@ -748,7 +748,7 @@ func (a *usersPreviewImpl) Create(ctx context.Context, request User) (*User, err return &user, err } -func (a *usersPreviewImpl) Delete(ctx context.Context, request DeleteUserRequest) error { +func (a *usersImpl) Delete(ctx context.Context, request DeleteUserRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Users/%v", request.Id) queryParams := make(map[string]any) @@ -757,7 +757,7 @@ func (a *usersPreviewImpl) Delete(ctx context.Context, request DeleteUserRequest return err } -func (a *usersPreviewImpl) Get(ctx context.Context, request GetUserRequest) (*User, error) { +func (a *usersImpl) Get(ctx context.Context, request GetUserRequest) (*User, error) { var user User path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Users/%v", request.Id) queryParams := make(map[string]any) @@ -767,7 +767,7 @@ func (a *usersPreviewImpl) Get(ctx context.Context, request GetUserRequest) (*Us return &user, err } -func (a *usersPreviewImpl) GetPermissionLevels(ctx context.Context) (*GetPasswordPermissionLevelsResponse, error) { +func (a *usersImpl) GetPermissionLevels(ctx context.Context) (*GetPasswordPermissionLevelsResponse, error) { var getPasswordPermissionLevelsResponse GetPasswordPermissionLevelsResponse path := "/api/2.0preview/permissions/authorization/passwords/permissionLevels" @@ -777,7 +777,7 @@ func (a *usersPreviewImpl) GetPermissionLevels(ctx context.Context) (*GetPasswor return &getPasswordPermissionLevelsResponse, err } -func (a *usersPreviewImpl) GetPermissions(ctx context.Context) (*PasswordPermissions, error) { +func (a *usersImpl) GetPermissions(ctx context.Context) (*PasswordPermissions, error) { var passwordPermissions PasswordPermissions path := "/api/2.0preview/permissions/authorization/passwords" @@ -790,7 +790,7 @@ func (a *usersPreviewImpl) GetPermissions(ctx context.Context) (*PasswordPermiss // List users. // // Gets details for all the users associated with a Databricks workspace. -func (a *usersPreviewImpl) List(ctx context.Context, request ListUsersRequest) listing.Iterator[User] { +func (a *usersImpl) List(ctx context.Context, request ListUsersRequest) listing.Iterator[User] { request.StartIndex = 1 // SCIM offset starts from 1 if request.Count == 0 { @@ -826,12 +826,12 @@ func (a *usersPreviewImpl) List(ctx context.Context, request ListUsersRequest) l // List users. // // Gets details for all the users associated with a Databricks workspace. -func (a *usersPreviewImpl) ListAll(ctx context.Context, request ListUsersRequest) ([]User, error) { +func (a *usersImpl) ListAll(ctx context.Context, request ListUsersRequest) ([]User, error) { iterator := a.List(ctx, request) return listing.ToSliceN[User, int64](ctx, iterator, request.Count) } -func (a *usersPreviewImpl) internalList(ctx context.Context, request ListUsersRequest) (*ListUsersResponse, error) { +func (a *usersImpl) internalList(ctx context.Context, request ListUsersRequest) (*ListUsersResponse, error) { var listUsersResponse ListUsersResponse path := "/api/2.0preview/preview/scim/v2/Users" queryParams := make(map[string]any) @@ -841,7 +841,7 @@ func (a *usersPreviewImpl) internalList(ctx context.Context, request ListUsersRe return &listUsersResponse, err } -func (a *usersPreviewImpl) Patch(ctx context.Context, request PartialUpdate) error { +func (a *usersImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Users/%v", request.Id) queryParams := make(map[string]any) @@ -852,7 +852,7 @@ func (a *usersPreviewImpl) Patch(ctx context.Context, request PartialUpdate) err return err } -func (a *usersPreviewImpl) SetPermissions(ctx context.Context, request PasswordPermissionsRequest) (*PasswordPermissions, error) { +func (a *usersImpl) SetPermissions(ctx context.Context, request PasswordPermissionsRequest) (*PasswordPermissions, error) { var passwordPermissions PasswordPermissions path := "/api/2.0preview/permissions/authorization/passwords" queryParams := make(map[string]any) @@ -863,7 +863,7 @@ func (a *usersPreviewImpl) SetPermissions(ctx context.Context, request PasswordP return &passwordPermissions, err } -func (a *usersPreviewImpl) Update(ctx context.Context, request User) error { +func (a *usersImpl) Update(ctx context.Context, request User) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0preview/preview/scim/v2/Users/%v", request.Id) queryParams := make(map[string]any) @@ -874,7 +874,7 @@ func (a *usersPreviewImpl) Update(ctx context.Context, request User) error { return err } -func (a *usersPreviewImpl) UpdatePermissions(ctx context.Context, request PasswordPermissionsRequest) (*PasswordPermissions, error) { +func (a *usersImpl) UpdatePermissions(ctx context.Context, request PasswordPermissionsRequest) (*PasswordPermissions, error) { var passwordPermissions PasswordPermissions path := "/api/2.0preview/permissions/authorization/passwords" queryParams := make(map[string]any) @@ -885,12 +885,12 @@ func (a *usersPreviewImpl) UpdatePermissions(ctx context.Context, request Passwo return &passwordPermissions, err } -// unexported type that holds implementations of just WorkspaceAssignmentPreview API methods -type workspaceAssignmentPreviewImpl struct { +// unexported type that holds implementations of just WorkspaceAssignment API methods +type workspaceAssignmentImpl struct { client *client.DatabricksClient } -func (a *workspaceAssignmentPreviewImpl) Delete(ctx context.Context, request DeleteWorkspaceAssignmentRequest) error { +func (a *workspaceAssignmentImpl) Delete(ctx context.Context, request DeleteWorkspaceAssignmentRequest) error { var deleteWorkspacePermissionAssignmentResponse DeleteWorkspacePermissionAssignmentResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/permissionassignments/principals/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.PrincipalId) queryParams := make(map[string]any) @@ -900,7 +900,7 @@ func (a *workspaceAssignmentPreviewImpl) Delete(ctx context.Context, request Del return err } -func (a *workspaceAssignmentPreviewImpl) Get(ctx context.Context, request GetWorkspaceAssignmentRequest) (*WorkspacePermissions, error) { +func (a *workspaceAssignmentImpl) Get(ctx context.Context, request GetWorkspaceAssignmentRequest) (*WorkspacePermissions, error) { var workspacePermissions WorkspacePermissions path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/permissionassignments/permissions", a.client.ConfiguredAccountID(), request.WorkspaceId) queryParams := make(map[string]any) @@ -914,7 +914,7 @@ func (a *workspaceAssignmentPreviewImpl) Get(ctx context.Context, request GetWor // // Get the permission assignments for the specified Databricks account and // Databricks workspace. -func (a *workspaceAssignmentPreviewImpl) List(ctx context.Context, request ListWorkspaceAssignmentRequest) listing.Iterator[PermissionAssignment] { +func (a *workspaceAssignmentImpl) List(ctx context.Context, request ListWorkspaceAssignmentRequest) listing.Iterator[PermissionAssignment] { getNextPage := func(ctx context.Context, req ListWorkspaceAssignmentRequest) (*PermissionAssignments, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -936,11 +936,11 @@ func (a *workspaceAssignmentPreviewImpl) List(ctx context.Context, request ListW // // Get the permission assignments for the specified Databricks account and // Databricks workspace. -func (a *workspaceAssignmentPreviewImpl) ListAll(ctx context.Context, request ListWorkspaceAssignmentRequest) ([]PermissionAssignment, error) { +func (a *workspaceAssignmentImpl) ListAll(ctx context.Context, request ListWorkspaceAssignmentRequest) ([]PermissionAssignment, error) { iterator := a.List(ctx, request) return listing.ToSlice[PermissionAssignment](ctx, iterator) } -func (a *workspaceAssignmentPreviewImpl) internalList(ctx context.Context, request ListWorkspaceAssignmentRequest) (*PermissionAssignments, error) { +func (a *workspaceAssignmentImpl) internalList(ctx context.Context, request ListWorkspaceAssignmentRequest) (*PermissionAssignments, error) { var permissionAssignments PermissionAssignments path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/permissionassignments", a.client.ConfiguredAccountID(), request.WorkspaceId) queryParams := make(map[string]any) @@ -950,7 +950,7 @@ func (a *workspaceAssignmentPreviewImpl) internalList(ctx context.Context, reque return &permissionAssignments, err } -func (a *workspaceAssignmentPreviewImpl) Update(ctx context.Context, request UpdateWorkspaceAssignments) (*PermissionAssignment, error) { +func (a *workspaceAssignmentImpl) Update(ctx context.Context, request UpdateWorkspaceAssignments) (*PermissionAssignment, error) { var permissionAssignment PermissionAssignment path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v/permissionassignments/principals/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.PrincipalId) queryParams := make(map[string]any) diff --git a/jobs/v2/model.go b/jobs/v2/model.go index 4b155968f..e6f55c621 100755 --- a/jobs/v2/model.go +++ b/jobs/v2/model.go @@ -5245,7 +5245,7 @@ type SubmitTask struct { Libraries []Library `json:"libraries,omitempty"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster *JobsClusterSpec `json:"new_cluster,omitempty"` + NewCluster *ClusterSpec `json:"new_cluster,omitempty"` // The task runs a notebook when the `notebook_task` field is present. NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications diff --git a/jobs/v2preview/api.go b/jobs/v2preview/api.go index 1113b9e00..2e517e268 100755 --- a/jobs/v2preview/api.go +++ b/jobs/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Jobs Preview, Policy Compliance For Jobs Preview, etc. +// These APIs allow you to manage Jobs, Policy Compliance For Jobs, etc. package jobspreview import ( @@ -12,7 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type JobsPreviewInterface interface { +type JobsInterface interface { // Cancel all runs of a job. // @@ -159,7 +159,7 @@ type JobsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListJobsRequest) ([]BaseJob, error) - // BaseJobSettingsNameToJobIdMap calls [JobsPreviewAPI.ListAll] and creates a map of results with [BaseJob].Settings.Name as key and [BaseJob].JobId as value. + // BaseJobSettingsNameToJobIdMap calls [JobsAPI.ListAll] and creates a map of results with [BaseJob].Settings.Name as key and [BaseJob].JobId as value. // // Returns an error if there's more than one [BaseJob] with the same .Settings.Name. // @@ -168,7 +168,7 @@ type JobsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. BaseJobSettingsNameToJobIdMap(ctx context.Context, request ListJobsRequest) (map[string]int64, error) - // GetBySettingsName calls [JobsPreviewAPI.BaseJobSettingsNameToJobIdMap] and returns a single [BaseJob]. + // GetBySettingsName calls [JobsAPI.BaseJobSettingsNameToJobIdMap] and returns a single [BaseJob]. // // Returns an error if there's more than one [BaseJob] with the same .Settings.Name. // @@ -237,9 +237,9 @@ type JobsPreviewInterface interface { UpdatePermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) } -func NewJobsPreview(client *client.DatabricksClient) *JobsPreviewAPI { - return &JobsPreviewAPI{ - jobsPreviewImpl: jobsPreviewImpl{ +func NewJobs(client *client.DatabricksClient) *JobsAPI { + return &JobsAPI{ + jobsImpl: jobsImpl{ client: client, }, } @@ -263,16 +263,16 @@ func NewJobsPreview(client *client.DatabricksClient) *JobsPreviewAPI { // [Databricks CLI]: https://docs.databricks.com/dev-tools/cli/index.html // [Secrets CLI]: https://docs.databricks.com/dev-tools/cli/secrets-cli.html // [Secrets utility]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-secrets -type JobsPreviewAPI struct { - jobsPreviewImpl +type JobsAPI struct { + jobsImpl } // Cancel a run. // // Cancels a job run or a task run. The run is canceled asynchronously, so it // may still be running when this request completes. -func (a *JobsPreviewAPI) CancelRunByRunId(ctx context.Context, runId int64) error { - return a.jobsPreviewImpl.CancelRun(ctx, CancelRun{ +func (a *JobsAPI) CancelRunByRunId(ctx context.Context, runId int64) error { + return a.jobsImpl.CancelRun(ctx, CancelRun{ RunId: runId, }) } @@ -280,8 +280,8 @@ func (a *JobsPreviewAPI) CancelRunByRunId(ctx context.Context, runId int64) erro // Delete a job. // // Deletes a job. -func (a *JobsPreviewAPI) DeleteByJobId(ctx context.Context, jobId int64) error { - return a.jobsPreviewImpl.Delete(ctx, DeleteJob{ +func (a *JobsAPI) DeleteByJobId(ctx context.Context, jobId int64) error { + return a.jobsImpl.Delete(ctx, DeleteJob{ JobId: jobId, }) } @@ -289,8 +289,8 @@ func (a *JobsPreviewAPI) DeleteByJobId(ctx context.Context, jobId int64) error { // Delete a job run. // // Deletes a non-active run. Returns an error if the run is active. -func (a *JobsPreviewAPI) DeleteRunByRunId(ctx context.Context, runId int64) error { - return a.jobsPreviewImpl.DeleteRun(ctx, DeleteRun{ +func (a *JobsAPI) DeleteRunByRunId(ctx context.Context, runId int64) error { + return a.jobsImpl.DeleteRun(ctx, DeleteRun{ RunId: runId, }) } @@ -304,8 +304,8 @@ func (a *JobsPreviewAPI) DeleteRunByRunId(ctx context.Context, runId int64) erro // field to check for more results and pass its value as the `page_token` in // subsequent requests. Arrays with fewer than 100 elements in a page will be // empty on later pages. -func (a *JobsPreviewAPI) GetByJobId(ctx context.Context, jobId int64) (*Job, error) { - return a.jobsPreviewImpl.Get(ctx, GetJobRequest{ +func (a *JobsAPI) GetByJobId(ctx context.Context, jobId int64) (*Job, error) { + return a.jobsImpl.Get(ctx, GetJobRequest{ JobId: jobId, }) } @@ -313,8 +313,8 @@ func (a *JobsPreviewAPI) GetByJobId(ctx context.Context, jobId int64) (*Job, err // Get job permission levels. // // Gets the permission levels that a user can have on an object. -func (a *JobsPreviewAPI) GetPermissionLevelsByJobId(ctx context.Context, jobId string) (*GetJobPermissionLevelsResponse, error) { - return a.jobsPreviewImpl.GetPermissionLevels(ctx, GetJobPermissionLevelsRequest{ +func (a *JobsAPI) GetPermissionLevelsByJobId(ctx context.Context, jobId string) (*GetJobPermissionLevelsResponse, error) { + return a.jobsImpl.GetPermissionLevels(ctx, GetJobPermissionLevelsRequest{ JobId: jobId, }) } @@ -323,8 +323,8 @@ func (a *JobsPreviewAPI) GetPermissionLevelsByJobId(ctx context.Context, jobId s // // Gets the permissions of a job. Jobs can inherit permissions from their root // object. -func (a *JobsPreviewAPI) GetPermissionsByJobId(ctx context.Context, jobId string) (*JobPermissions, error) { - return a.jobsPreviewImpl.GetPermissions(ctx, GetJobPermissionsRequest{ +func (a *JobsAPI) GetPermissionsByJobId(ctx context.Context, jobId string) (*JobPermissions, error) { + return a.jobsImpl.GetPermissions(ctx, GetJobPermissionsRequest{ JobId: jobId, }) } @@ -341,20 +341,20 @@ func (a *JobsPreviewAPI) GetPermissionsByJobId(ctx context.Context, jobId string // HTTP status code 400 if the __run_id__ parameter is invalid. Runs are // automatically removed after 60 days. If you to want to reference them beyond // 60 days, you must save old run results before they expire. -func (a *JobsPreviewAPI) GetRunOutputByRunId(ctx context.Context, runId int64) (*RunOutput, error) { - return a.jobsPreviewImpl.GetRunOutput(ctx, GetRunOutputRequest{ +func (a *JobsAPI) GetRunOutputByRunId(ctx context.Context, runId int64) (*RunOutput, error) { + return a.jobsImpl.GetRunOutput(ctx, GetRunOutputRequest{ RunId: runId, }) } -// BaseJobSettingsNameToJobIdMap calls [JobsPreviewAPI.ListAll] and creates a map of results with [BaseJob].Settings.Name as key and [BaseJob].JobId as value. +// BaseJobSettingsNameToJobIdMap calls [JobsAPI.ListAll] and creates a map of results with [BaseJob].Settings.Name as key and [BaseJob].JobId as value. // // Returns an error if there's more than one [BaseJob] with the same .Settings.Name. // // Note: All [BaseJob] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *JobsPreviewAPI) BaseJobSettingsNameToJobIdMap(ctx context.Context, request ListJobsRequest) (map[string]int64, error) { +func (a *JobsAPI) BaseJobSettingsNameToJobIdMap(ctx context.Context, request ListJobsRequest) (map[string]int64, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]int64{} result, err := a.ListAll(ctx, request) @@ -372,14 +372,14 @@ func (a *JobsPreviewAPI) BaseJobSettingsNameToJobIdMap(ctx context.Context, requ return mapping, nil } -// GetBySettingsName calls [JobsPreviewAPI.BaseJobSettingsNameToJobIdMap] and returns a single [BaseJob]. +// GetBySettingsName calls [JobsAPI.BaseJobSettingsNameToJobIdMap] and returns a single [BaseJob]. // // Returns an error if there's more than one [BaseJob] with the same .Settings.Name. // // Note: All [BaseJob] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *JobsPreviewAPI) GetBySettingsName(ctx context.Context, name string) (*BaseJob, error) { +func (a *JobsAPI) GetBySettingsName(ctx context.Context, name string) (*BaseJob, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListJobsRequest{}) if err != nil { @@ -400,7 +400,7 @@ func (a *JobsPreviewAPI) GetBySettingsName(ctx context.Context, name string) (*B return &alternatives[0], nil } -type PolicyComplianceForJobsPreviewInterface interface { +type PolicyComplianceForJobsInterface interface { // Enforce job policy compliance. // @@ -447,9 +447,9 @@ type PolicyComplianceForJobsPreviewInterface interface { ListComplianceAll(ctx context.Context, request ListJobComplianceRequest) ([]JobCompliance, error) } -func NewPolicyComplianceForJobsPreview(client *client.DatabricksClient) *PolicyComplianceForJobsPreviewAPI { - return &PolicyComplianceForJobsPreviewAPI{ - policyComplianceForJobsPreviewImpl: policyComplianceForJobsPreviewImpl{ +func NewPolicyComplianceForJobs(client *client.DatabricksClient) *PolicyComplianceForJobsAPI { + return &PolicyComplianceForJobsAPI{ + policyComplianceForJobsImpl: policyComplianceForJobsImpl{ client: client, }, } @@ -468,8 +468,8 @@ func NewPolicyComplianceForJobsPreview(client *client.DatabricksClient) *PolicyC // The get and list compliance APIs allow you to view the policy compliance // status of a job. The enforce compliance API allows you to update a job so // that it becomes compliant with all of its policies. -type PolicyComplianceForJobsPreviewAPI struct { - policyComplianceForJobsPreviewImpl +type PolicyComplianceForJobsAPI struct { + policyComplianceForJobsImpl } // Get job policy compliance. @@ -478,8 +478,8 @@ type PolicyComplianceForJobsPreviewAPI struct { // compliance if a cluster policy they use was updated after the job was last // edited and some of its job clusters no longer comply with their updated // policies. -func (a *PolicyComplianceForJobsPreviewAPI) GetComplianceByJobId(ctx context.Context, jobId int64) (*GetPolicyComplianceResponse, error) { - return a.policyComplianceForJobsPreviewImpl.GetCompliance(ctx, GetPolicyComplianceRequest{ +func (a *PolicyComplianceForJobsAPI) GetComplianceByJobId(ctx context.Context, jobId int64) (*GetPolicyComplianceResponse, error) { + return a.policyComplianceForJobsImpl.GetCompliance(ctx, GetPolicyComplianceRequest{ JobId: jobId, }) } diff --git a/jobs/v2preview/client.go b/jobs/v2preview/client.go index 888215a5a..9648cd788 100755 --- a/jobs/v2preview/client.go +++ b/jobs/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type JobsPreviewClient struct { - JobsPreviewInterface +type JobsClient struct { + JobsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewJobsPreviewClient(cfg *config.Config) (*JobsPreviewClient, error) { +func NewJobsClient(cfg *config.Config) (*JobsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewJobsPreviewClient(cfg *config.Config) (*JobsPreviewClient, error) { return nil, err } - return &JobsPreviewClient{ - Config: cfg, - apiClient: apiClient, - JobsPreviewInterface: NewJobsPreview(databricksClient), + return &JobsClient{ + Config: cfg, + apiClient: apiClient, + JobsInterface: NewJobs(databricksClient), }, nil } -type PolicyComplianceForJobsPreviewClient struct { - PolicyComplianceForJobsPreviewInterface +type PolicyComplianceForJobsClient struct { + PolicyComplianceForJobsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewPolicyComplianceForJobsPreviewClient(cfg *config.Config) (*PolicyComplianceForJobsPreviewClient, error) { +func NewPolicyComplianceForJobsClient(cfg *config.Config) (*PolicyComplianceForJobsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,9 +71,9 @@ func NewPolicyComplianceForJobsPreviewClient(cfg *config.Config) (*PolicyComplia return nil, err } - return &PolicyComplianceForJobsPreviewClient{ - Config: cfg, - apiClient: apiClient, - PolicyComplianceForJobsPreviewInterface: NewPolicyComplianceForJobsPreview(databricksClient), + return &PolicyComplianceForJobsClient{ + Config: cfg, + apiClient: apiClient, + PolicyComplianceForJobsInterface: NewPolicyComplianceForJobs(databricksClient), }, nil } diff --git a/jobs/v2preview/impl.go b/jobs/v2preview/impl.go index cd7558d33..ba02415f1 100755 --- a/jobs/v2preview/impl.go +++ b/jobs/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just JobsPreview API methods -type jobsPreviewImpl struct { +// unexported type that holds implementations of just Jobs API methods +type jobsImpl struct { client *client.DatabricksClient } -func (a *jobsPreviewImpl) CancelAllRuns(ctx context.Context, request CancelAllRuns) error { +func (a *jobsImpl) CancelAllRuns(ctx context.Context, request CancelAllRuns) error { var cancelAllRunsResponse CancelAllRunsResponse path := "/api/2.2preview/jobs/runs/cancel-all" queryParams := make(map[string]any) @@ -28,7 +28,7 @@ func (a *jobsPreviewImpl) CancelAllRuns(ctx context.Context, request CancelAllRu return err } -func (a *jobsPreviewImpl) CancelRun(ctx context.Context, request CancelRun) error { +func (a *jobsImpl) CancelRun(ctx context.Context, request CancelRun) error { var cancelRunResponse CancelRunResponse path := "/api/2.2preview/jobs/runs/cancel" queryParams := make(map[string]any) @@ -39,7 +39,7 @@ func (a *jobsPreviewImpl) CancelRun(ctx context.Context, request CancelRun) erro return err } -func (a *jobsPreviewImpl) Create(ctx context.Context, request CreateJob) (*CreateResponse, error) { +func (a *jobsImpl) Create(ctx context.Context, request CreateJob) (*CreateResponse, error) { var createResponse CreateResponse path := "/api/2.2preview/jobs/create" queryParams := make(map[string]any) @@ -50,7 +50,7 @@ func (a *jobsPreviewImpl) Create(ctx context.Context, request CreateJob) (*Creat return &createResponse, err } -func (a *jobsPreviewImpl) Delete(ctx context.Context, request DeleteJob) error { +func (a *jobsImpl) Delete(ctx context.Context, request DeleteJob) error { var deleteResponse DeleteResponse path := "/api/2.2preview/jobs/delete" queryParams := make(map[string]any) @@ -61,7 +61,7 @@ func (a *jobsPreviewImpl) Delete(ctx context.Context, request DeleteJob) error { return err } -func (a *jobsPreviewImpl) DeleteRun(ctx context.Context, request DeleteRun) error { +func (a *jobsImpl) DeleteRun(ctx context.Context, request DeleteRun) error { var deleteRunResponse DeleteRunResponse path := "/api/2.2preview/jobs/runs/delete" queryParams := make(map[string]any) @@ -72,7 +72,7 @@ func (a *jobsPreviewImpl) DeleteRun(ctx context.Context, request DeleteRun) erro return err } -func (a *jobsPreviewImpl) ExportRun(ctx context.Context, request ExportRunRequest) (*ExportRunOutput, error) { +func (a *jobsImpl) ExportRun(ctx context.Context, request ExportRunRequest) (*ExportRunOutput, error) { var exportRunOutput ExportRunOutput path := "/api/2.2preview/jobs/runs/export" queryParams := make(map[string]any) @@ -82,7 +82,7 @@ func (a *jobsPreviewImpl) ExportRun(ctx context.Context, request ExportRunReques return &exportRunOutput, err } -func (a *jobsPreviewImpl) Get(ctx context.Context, request GetJobRequest) (*Job, error) { +func (a *jobsImpl) Get(ctx context.Context, request GetJobRequest) (*Job, error) { var job Job path := "/api/2.2preview/jobs/get" queryParams := make(map[string]any) @@ -92,7 +92,7 @@ func (a *jobsPreviewImpl) Get(ctx context.Context, request GetJobRequest) (*Job, return &job, err } -func (a *jobsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetJobPermissionLevelsRequest) (*GetJobPermissionLevelsResponse, error) { +func (a *jobsImpl) GetPermissionLevels(ctx context.Context, request GetJobPermissionLevelsRequest) (*GetJobPermissionLevelsResponse, error) { var getJobPermissionLevelsResponse GetJobPermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/jobs/%v/permissionLevels", request.JobId) queryParams := make(map[string]any) @@ -102,7 +102,7 @@ func (a *jobsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetJo return &getJobPermissionLevelsResponse, err } -func (a *jobsPreviewImpl) GetPermissions(ctx context.Context, request GetJobPermissionsRequest) (*JobPermissions, error) { +func (a *jobsImpl) GetPermissions(ctx context.Context, request GetJobPermissionsRequest) (*JobPermissions, error) { var jobPermissions JobPermissions path := fmt.Sprintf("/api/2.0preview/permissions/jobs/%v", request.JobId) queryParams := make(map[string]any) @@ -112,7 +112,7 @@ func (a *jobsPreviewImpl) GetPermissions(ctx context.Context, request GetJobPerm return &jobPermissions, err } -func (a *jobsPreviewImpl) GetRun(ctx context.Context, request GetRunRequest) (*Run, error) { +func (a *jobsImpl) GetRun(ctx context.Context, request GetRunRequest) (*Run, error) { var run Run path := "/api/2.2preview/jobs/runs/get" queryParams := make(map[string]any) @@ -122,7 +122,7 @@ func (a *jobsPreviewImpl) GetRun(ctx context.Context, request GetRunRequest) (*R return &run, err } -func (a *jobsPreviewImpl) GetRunOutput(ctx context.Context, request GetRunOutputRequest) (*RunOutput, error) { +func (a *jobsImpl) GetRunOutput(ctx context.Context, request GetRunOutputRequest) (*RunOutput, error) { var runOutput RunOutput path := "/api/2.2preview/jobs/runs/get-output" queryParams := make(map[string]any) @@ -135,7 +135,7 @@ func (a *jobsPreviewImpl) GetRunOutput(ctx context.Context, request GetRunOutput // List jobs. // // Retrieves a list of jobs. -func (a *jobsPreviewImpl) List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob] { +func (a *jobsImpl) List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob] { getNextPage := func(ctx context.Context, req ListJobsRequest) (*ListJobsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -162,11 +162,11 @@ func (a *jobsPreviewImpl) List(ctx context.Context, request ListJobsRequest) lis // List jobs. // // Retrieves a list of jobs. -func (a *jobsPreviewImpl) ListAll(ctx context.Context, request ListJobsRequest) ([]BaseJob, error) { +func (a *jobsImpl) ListAll(ctx context.Context, request ListJobsRequest) ([]BaseJob, error) { iterator := a.List(ctx, request) return listing.ToSlice[BaseJob](ctx, iterator) } -func (a *jobsPreviewImpl) internalList(ctx context.Context, request ListJobsRequest) (*ListJobsResponse, error) { +func (a *jobsImpl) internalList(ctx context.Context, request ListJobsRequest) (*ListJobsResponse, error) { var listJobsResponse ListJobsResponse path := "/api/2.2preview/jobs/list" queryParams := make(map[string]any) @@ -179,7 +179,7 @@ func (a *jobsPreviewImpl) internalList(ctx context.Context, request ListJobsRequ // List job runs. // // List runs in descending order by start time. -func (a *jobsPreviewImpl) ListRuns(ctx context.Context, request ListRunsRequest) listing.Iterator[BaseRun] { +func (a *jobsImpl) ListRuns(ctx context.Context, request ListRunsRequest) listing.Iterator[BaseRun] { getNextPage := func(ctx context.Context, req ListRunsRequest) (*ListRunsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -206,11 +206,11 @@ func (a *jobsPreviewImpl) ListRuns(ctx context.Context, request ListRunsRequest) // List job runs. // // List runs in descending order by start time. -func (a *jobsPreviewImpl) ListRunsAll(ctx context.Context, request ListRunsRequest) ([]BaseRun, error) { +func (a *jobsImpl) ListRunsAll(ctx context.Context, request ListRunsRequest) ([]BaseRun, error) { iterator := a.ListRuns(ctx, request) return listing.ToSlice[BaseRun](ctx, iterator) } -func (a *jobsPreviewImpl) internalListRuns(ctx context.Context, request ListRunsRequest) (*ListRunsResponse, error) { +func (a *jobsImpl) internalListRuns(ctx context.Context, request ListRunsRequest) (*ListRunsResponse, error) { var listRunsResponse ListRunsResponse path := "/api/2.2preview/jobs/runs/list" queryParams := make(map[string]any) @@ -220,7 +220,7 @@ func (a *jobsPreviewImpl) internalListRuns(ctx context.Context, request ListRuns return &listRunsResponse, err } -func (a *jobsPreviewImpl) RepairRun(ctx context.Context, request RepairRun) (*RepairRunResponse, error) { +func (a *jobsImpl) RepairRun(ctx context.Context, request RepairRun) (*RepairRunResponse, error) { var repairRunResponse RepairRunResponse path := "/api/2.2preview/jobs/runs/repair" queryParams := make(map[string]any) @@ -231,7 +231,7 @@ func (a *jobsPreviewImpl) RepairRun(ctx context.Context, request RepairRun) (*Re return &repairRunResponse, err } -func (a *jobsPreviewImpl) Reset(ctx context.Context, request ResetJob) error { +func (a *jobsImpl) Reset(ctx context.Context, request ResetJob) error { var resetResponse ResetResponse path := "/api/2.2preview/jobs/reset" queryParams := make(map[string]any) @@ -242,7 +242,7 @@ func (a *jobsPreviewImpl) Reset(ctx context.Context, request ResetJob) error { return err } -func (a *jobsPreviewImpl) RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error) { +func (a *jobsImpl) RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error) { var runNowResponse RunNowResponse path := "/api/2.2preview/jobs/run-now" queryParams := make(map[string]any) @@ -253,7 +253,7 @@ func (a *jobsPreviewImpl) RunNow(ctx context.Context, request RunNow) (*RunNowRe return &runNowResponse, err } -func (a *jobsPreviewImpl) SetPermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) { +func (a *jobsImpl) SetPermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) { var jobPermissions JobPermissions path := fmt.Sprintf("/api/2.0preview/permissions/jobs/%v", request.JobId) queryParams := make(map[string]any) @@ -264,7 +264,7 @@ func (a *jobsPreviewImpl) SetPermissions(ctx context.Context, request JobPermiss return &jobPermissions, err } -func (a *jobsPreviewImpl) Submit(ctx context.Context, request SubmitRun) (*SubmitRunResponse, error) { +func (a *jobsImpl) Submit(ctx context.Context, request SubmitRun) (*SubmitRunResponse, error) { var submitRunResponse SubmitRunResponse path := "/api/2.2preview/jobs/runs/submit" queryParams := make(map[string]any) @@ -275,7 +275,7 @@ func (a *jobsPreviewImpl) Submit(ctx context.Context, request SubmitRun) (*Submi return &submitRunResponse, err } -func (a *jobsPreviewImpl) Update(ctx context.Context, request UpdateJob) error { +func (a *jobsImpl) Update(ctx context.Context, request UpdateJob) error { var updateResponse UpdateResponse path := "/api/2.2preview/jobs/update" queryParams := make(map[string]any) @@ -286,7 +286,7 @@ func (a *jobsPreviewImpl) Update(ctx context.Context, request UpdateJob) error { return err } -func (a *jobsPreviewImpl) UpdatePermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) { +func (a *jobsImpl) UpdatePermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) { var jobPermissions JobPermissions path := fmt.Sprintf("/api/2.0preview/permissions/jobs/%v", request.JobId) queryParams := make(map[string]any) @@ -297,12 +297,12 @@ func (a *jobsPreviewImpl) UpdatePermissions(ctx context.Context, request JobPerm return &jobPermissions, err } -// unexported type that holds implementations of just PolicyComplianceForJobsPreview API methods -type policyComplianceForJobsPreviewImpl struct { +// unexported type that holds implementations of just PolicyComplianceForJobs API methods +type policyComplianceForJobsImpl struct { client *client.DatabricksClient } -func (a *policyComplianceForJobsPreviewImpl) EnforceCompliance(ctx context.Context, request EnforcePolicyComplianceRequest) (*EnforcePolicyComplianceResponse, error) { +func (a *policyComplianceForJobsImpl) EnforceCompliance(ctx context.Context, request EnforcePolicyComplianceRequest) (*EnforcePolicyComplianceResponse, error) { var enforcePolicyComplianceResponse EnforcePolicyComplianceResponse path := "/api/2.0preview/policies/jobs/enforce-compliance" queryParams := make(map[string]any) @@ -313,7 +313,7 @@ func (a *policyComplianceForJobsPreviewImpl) EnforceCompliance(ctx context.Conte return &enforcePolicyComplianceResponse, err } -func (a *policyComplianceForJobsPreviewImpl) GetCompliance(ctx context.Context, request GetPolicyComplianceRequest) (*GetPolicyComplianceResponse, error) { +func (a *policyComplianceForJobsImpl) GetCompliance(ctx context.Context, request GetPolicyComplianceRequest) (*GetPolicyComplianceResponse, error) { var getPolicyComplianceResponse GetPolicyComplianceResponse path := "/api/2.0preview/policies/jobs/get-compliance" queryParams := make(map[string]any) @@ -329,7 +329,7 @@ func (a *policyComplianceForJobsPreviewImpl) GetCompliance(ctx context.Context, // Jobs could be out of compliance if a cluster policy they use was updated // after the job was last edited and its job clusters no longer comply with the // updated policy. -func (a *policyComplianceForJobsPreviewImpl) ListCompliance(ctx context.Context, request ListJobComplianceRequest) listing.Iterator[JobCompliance] { +func (a *policyComplianceForJobsImpl) ListCompliance(ctx context.Context, request ListJobComplianceRequest) listing.Iterator[JobCompliance] { getNextPage := func(ctx context.Context, req ListJobComplianceRequest) (*ListJobComplianceForPolicyResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -359,11 +359,11 @@ func (a *policyComplianceForJobsPreviewImpl) ListCompliance(ctx context.Context, // Jobs could be out of compliance if a cluster policy they use was updated // after the job was last edited and its job clusters no longer comply with the // updated policy. -func (a *policyComplianceForJobsPreviewImpl) ListComplianceAll(ctx context.Context, request ListJobComplianceRequest) ([]JobCompliance, error) { +func (a *policyComplianceForJobsImpl) ListComplianceAll(ctx context.Context, request ListJobComplianceRequest) ([]JobCompliance, error) { iterator := a.ListCompliance(ctx, request) return listing.ToSlice[JobCompliance](ctx, iterator) } -func (a *policyComplianceForJobsPreviewImpl) internalListCompliance(ctx context.Context, request ListJobComplianceRequest) (*ListJobComplianceForPolicyResponse, error) { +func (a *policyComplianceForJobsImpl) internalListCompliance(ctx context.Context, request ListJobComplianceRequest) (*ListJobComplianceForPolicyResponse, error) { var listJobComplianceForPolicyResponse ListJobComplianceForPolicyResponse path := "/api/2.0preview/policies/jobs/list-compliance" queryParams := make(map[string]any) diff --git a/jobs/v2preview/model.go b/jobs/v2preview/model.go index 8edd5ef1f..40e8466d7 100755 --- a/jobs/v2preview/model.go +++ b/jobs/v2preview/model.go @@ -5245,7 +5245,7 @@ type SubmitTask struct { Libraries []Library `json:"libraries,omitempty"` // If new_cluster, a description of a new cluster that is created for each // run. - NewCluster *JobsClusterSpec `json:"new_cluster,omitempty"` + NewCluster *ClusterSpec `json:"new_cluster,omitempty"` // The task runs a notebook when the `notebook_task` field is present. NotebookTask *NotebookTask `json:"notebook_task,omitempty"` // Optional notification settings that are used when sending notifications diff --git a/marketplace/v2preview/api.go b/marketplace/v2preview/api.go index 63ba34419..01c2f699e 100755 --- a/marketplace/v2preview/api.go +++ b/marketplace/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Consumer Fulfillments Preview, Consumer Installations Preview, Consumer Listings Preview, Consumer Personalization Requests Preview, Consumer Providers Preview, Provider Exchange Filters Preview, Provider Exchanges Preview, Provider Files Preview, Provider Listings Preview, Provider Personalization Requests Preview, Provider Provider Analytics Dashboards Preview, Provider Providers Preview, etc. +// These APIs allow you to manage Consumer Fulfillments, Consumer Installations, Consumer Listings, Consumer Personalization Requests, Consumer Providers, Provider Exchange Filters, Provider Exchanges, Provider Files, Provider Listings, Provider Personalization Requests, Provider Provider Analytics Dashboards, Provider Providers, etc. package marketplacepreview import ( @@ -12,7 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type ConsumerFulfillmentsPreviewInterface interface { +type ConsumerFulfillmentsInterface interface { // Get listing content metadata. // @@ -65,24 +65,24 @@ type ConsumerFulfillmentsPreviewInterface interface { ListByListingId(ctx context.Context, listingId string) (*ListFulfillmentsResponse, error) } -func NewConsumerFulfillmentsPreview(client *client.DatabricksClient) *ConsumerFulfillmentsPreviewAPI { - return &ConsumerFulfillmentsPreviewAPI{ - consumerFulfillmentsPreviewImpl: consumerFulfillmentsPreviewImpl{ +func NewConsumerFulfillments(client *client.DatabricksClient) *ConsumerFulfillmentsAPI { + return &ConsumerFulfillmentsAPI{ + consumerFulfillmentsImpl: consumerFulfillmentsImpl{ client: client, }, } } // Fulfillments are entities that allow consumers to preview installations. -type ConsumerFulfillmentsPreviewAPI struct { - consumerFulfillmentsPreviewImpl +type ConsumerFulfillmentsAPI struct { + consumerFulfillmentsImpl } // Get listing content metadata. // // Get a high level preview of the metadata of listing installable content. -func (a *ConsumerFulfillmentsPreviewAPI) GetByListingId(ctx context.Context, listingId string) (*GetListingContentMetadataResponse, error) { - return a.consumerFulfillmentsPreviewImpl.internalGet(ctx, GetListingContentMetadataRequest{ +func (a *ConsumerFulfillmentsAPI) GetByListingId(ctx context.Context, listingId string) (*GetListingContentMetadataResponse, error) { + return a.consumerFulfillmentsImpl.internalGet(ctx, GetListingContentMetadataRequest{ ListingId: listingId, }) } @@ -94,13 +94,13 @@ func (a *ConsumerFulfillmentsPreviewAPI) GetByListingId(ctx context.Context, lis // attached share or git repo. Only one of these fields will be present. // Personalized installations contain metadata about the attached share or git // repo, as well as the Delta Sharing recipient type. -func (a *ConsumerFulfillmentsPreviewAPI) ListByListingId(ctx context.Context, listingId string) (*ListFulfillmentsResponse, error) { - return a.consumerFulfillmentsPreviewImpl.internalList(ctx, ListFulfillmentsRequest{ +func (a *ConsumerFulfillmentsAPI) ListByListingId(ctx context.Context, listingId string) (*ListFulfillmentsResponse, error) { + return a.consumerFulfillmentsImpl.internalList(ctx, ListFulfillmentsRequest{ ListingId: listingId, }) } -type ConsumerInstallationsPreviewInterface interface { +type ConsumerInstallationsInterface interface { // Install from a listing. // @@ -160,9 +160,9 @@ type ConsumerInstallationsPreviewInterface interface { Update(ctx context.Context, request UpdateInstallationRequest) (*UpdateInstallationResponse, error) } -func NewConsumerInstallationsPreview(client *client.DatabricksClient) *ConsumerInstallationsPreviewAPI { - return &ConsumerInstallationsPreviewAPI{ - consumerInstallationsPreviewImpl: consumerInstallationsPreviewImpl{ +func NewConsumerInstallations(client *client.DatabricksClient) *ConsumerInstallationsAPI { + return &ConsumerInstallationsAPI{ + consumerInstallationsImpl: consumerInstallationsImpl{ client: client, }, } @@ -170,15 +170,15 @@ func NewConsumerInstallationsPreview(client *client.DatabricksClient) *ConsumerI // Installations are entities that allow consumers to interact with Databricks // Marketplace listings. -type ConsumerInstallationsPreviewAPI struct { - consumerInstallationsPreviewImpl +type ConsumerInstallationsAPI struct { + consumerInstallationsImpl } // Uninstall from a listing. // // Uninstall an installation associated with a Databricks Marketplace listing. -func (a *ConsumerInstallationsPreviewAPI) DeleteByListingIdAndInstallationId(ctx context.Context, listingId string, installationId string) error { - return a.consumerInstallationsPreviewImpl.Delete(ctx, DeleteInstallationRequest{ +func (a *ConsumerInstallationsAPI) DeleteByListingIdAndInstallationId(ctx context.Context, listingId string, installationId string) error { + return a.consumerInstallationsImpl.Delete(ctx, DeleteInstallationRequest{ ListingId: listingId, InstallationId: installationId, }) @@ -187,13 +187,13 @@ func (a *ConsumerInstallationsPreviewAPI) DeleteByListingIdAndInstallationId(ctx // List installations for a listing. // // List all installations for a particular listing. -func (a *ConsumerInstallationsPreviewAPI) ListListingInstallationsByListingId(ctx context.Context, listingId string) (*ListInstallationsResponse, error) { - return a.consumerInstallationsPreviewImpl.internalListListingInstallations(ctx, ListInstallationsRequest{ +func (a *ConsumerInstallationsAPI) ListListingInstallationsByListingId(ctx context.Context, listingId string) (*ListInstallationsResponse, error) { + return a.consumerInstallationsImpl.internalListListingInstallations(ctx, ListInstallationsRequest{ ListingId: listingId, }) } -type ConsumerListingsPreviewInterface interface { +type ConsumerListingsInterface interface { // Get one batch of listings. One may specify up to 50 IDs per request. // @@ -229,7 +229,7 @@ type ConsumerListingsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListListingsRequest) ([]Listing, error) - // ListingSummaryNameToIdMap calls [ConsumerListingsPreviewAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. + // ListingSummaryNameToIdMap calls [ConsumerListingsAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. // // Returns an error if there's more than one [Listing] with the same .Summary.Name. // @@ -238,7 +238,7 @@ type ConsumerListingsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListingSummaryNameToIdMap(ctx context.Context, request ListListingsRequest) (map[string]string, error) - // GetBySummaryName calls [ConsumerListingsPreviewAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. + // GetBySummaryName calls [ConsumerListingsAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. // // Returns an error if there's more than one [Listing] with the same .Summary.Name. // @@ -266,9 +266,9 @@ type ConsumerListingsPreviewInterface interface { SearchAll(ctx context.Context, request SearchListingsRequest) ([]Listing, error) } -func NewConsumerListingsPreview(client *client.DatabricksClient) *ConsumerListingsPreviewAPI { - return &ConsumerListingsPreviewAPI{ - consumerListingsPreviewImpl: consumerListingsPreviewImpl{ +func NewConsumerListings(client *client.DatabricksClient) *ConsumerListingsAPI { + return &ConsumerListingsAPI{ + consumerListingsImpl: consumerListingsImpl{ client: client, }, } @@ -276,28 +276,28 @@ func NewConsumerListingsPreview(client *client.DatabricksClient) *ConsumerListin // Listings are the core entities in the Marketplace. They represent the // products that are available for consumption. -type ConsumerListingsPreviewAPI struct { - consumerListingsPreviewImpl +type ConsumerListingsAPI struct { + consumerListingsImpl } // Get listing. // // Get a published listing in the Databricks Marketplace that the consumer has // access to. -func (a *ConsumerListingsPreviewAPI) GetById(ctx context.Context, id string) (*GetListingResponse, error) { - return a.consumerListingsPreviewImpl.Get(ctx, GetListingRequest{ +func (a *ConsumerListingsAPI) GetById(ctx context.Context, id string) (*GetListingResponse, error) { + return a.consumerListingsImpl.Get(ctx, GetListingRequest{ Id: id, }) } -// ListingSummaryNameToIdMap calls [ConsumerListingsPreviewAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. +// ListingSummaryNameToIdMap calls [ConsumerListingsAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. // // Returns an error if there's more than one [Listing] with the same .Summary.Name. // // Note: All [Listing] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ConsumerListingsPreviewAPI) ListingSummaryNameToIdMap(ctx context.Context, request ListListingsRequest) (map[string]string, error) { +func (a *ConsumerListingsAPI) ListingSummaryNameToIdMap(ctx context.Context, request ListListingsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -315,14 +315,14 @@ func (a *ConsumerListingsPreviewAPI) ListingSummaryNameToIdMap(ctx context.Conte return mapping, nil } -// GetBySummaryName calls [ConsumerListingsPreviewAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. +// GetBySummaryName calls [ConsumerListingsAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. // // Returns an error if there's more than one [Listing] with the same .Summary.Name. // // Note: All [Listing] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ConsumerListingsPreviewAPI) GetBySummaryName(ctx context.Context, name string) (*Listing, error) { +func (a *ConsumerListingsAPI) GetBySummaryName(ctx context.Context, name string) (*Listing, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListListingsRequest{}) if err != nil { @@ -343,7 +343,7 @@ func (a *ConsumerListingsPreviewAPI) GetBySummaryName(ctx context.Context, name return &alternatives[0], nil } -type ConsumerPersonalizationRequestsPreviewInterface interface { +type ConsumerPersonalizationRequestsInterface interface { // Create a personalization request. // @@ -377,9 +377,9 @@ type ConsumerPersonalizationRequestsPreviewInterface interface { ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) } -func NewConsumerPersonalizationRequestsPreview(client *client.DatabricksClient) *ConsumerPersonalizationRequestsPreviewAPI { - return &ConsumerPersonalizationRequestsPreviewAPI{ - consumerPersonalizationRequestsPreviewImpl: consumerPersonalizationRequestsPreviewImpl{ +func NewConsumerPersonalizationRequests(client *client.DatabricksClient) *ConsumerPersonalizationRequestsAPI { + return &ConsumerPersonalizationRequestsAPI{ + consumerPersonalizationRequestsImpl: consumerPersonalizationRequestsImpl{ client: client, }, } @@ -387,21 +387,21 @@ func NewConsumerPersonalizationRequestsPreview(client *client.DatabricksClient) // Personalization Requests allow customers to interact with the individualized // Marketplace listing flow. -type ConsumerPersonalizationRequestsPreviewAPI struct { - consumerPersonalizationRequestsPreviewImpl +type ConsumerPersonalizationRequestsAPI struct { + consumerPersonalizationRequestsImpl } // Get the personalization request for a listing. // // Get the personalization request for a listing. Each consumer can make at // *most* one personalization request for a listing. -func (a *ConsumerPersonalizationRequestsPreviewAPI) GetByListingId(ctx context.Context, listingId string) (*GetPersonalizationRequestResponse, error) { - return a.consumerPersonalizationRequestsPreviewImpl.Get(ctx, GetPersonalizationRequestRequest{ +func (a *ConsumerPersonalizationRequestsAPI) GetByListingId(ctx context.Context, listingId string) (*GetPersonalizationRequestResponse, error) { + return a.consumerPersonalizationRequestsImpl.Get(ctx, GetPersonalizationRequestRequest{ ListingId: listingId, }) } -type ConsumerProvidersPreviewInterface interface { +type ConsumerProvidersInterface interface { // Get one batch of providers. One may specify up to 50 IDs per request. // @@ -437,7 +437,7 @@ type ConsumerProvidersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) - // ProviderInfoNameToIdMap calls [ConsumerProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. + // ProviderInfoNameToIdMap calls [ConsumerProvidersAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. // @@ -446,7 +446,7 @@ type ConsumerProvidersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ProviderInfoNameToIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) - // GetByName calls [ConsumerProvidersPreviewAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. + // GetByName calls [ConsumerProvidersAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. // @@ -456,37 +456,37 @@ type ConsumerProvidersPreviewInterface interface { GetByName(ctx context.Context, name string) (*ProviderInfo, error) } -func NewConsumerProvidersPreview(client *client.DatabricksClient) *ConsumerProvidersPreviewAPI { - return &ConsumerProvidersPreviewAPI{ - consumerProvidersPreviewImpl: consumerProvidersPreviewImpl{ +func NewConsumerProviders(client *client.DatabricksClient) *ConsumerProvidersAPI { + return &ConsumerProvidersAPI{ + consumerProvidersImpl: consumerProvidersImpl{ client: client, }, } } // Providers are the entities that publish listings to the Marketplace. -type ConsumerProvidersPreviewAPI struct { - consumerProvidersPreviewImpl +type ConsumerProvidersAPI struct { + consumerProvidersImpl } // Get a provider. // // Get a provider in the Databricks Marketplace with at least one visible // listing. -func (a *ConsumerProvidersPreviewAPI) GetById(ctx context.Context, id string) (*GetProviderResponse, error) { - return a.consumerProvidersPreviewImpl.Get(ctx, GetProviderRequest{ +func (a *ConsumerProvidersAPI) GetById(ctx context.Context, id string) (*GetProviderResponse, error) { + return a.consumerProvidersImpl.Get(ctx, GetProviderRequest{ Id: id, }) } -// ProviderInfoNameToIdMap calls [ConsumerProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. +// ProviderInfoNameToIdMap calls [ConsumerProvidersAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. // // Note: All [ProviderInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ConsumerProvidersPreviewAPI) ProviderInfoNameToIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) { +func (a *ConsumerProvidersAPI) ProviderInfoNameToIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -504,14 +504,14 @@ func (a *ConsumerProvidersPreviewAPI) ProviderInfoNameToIdMap(ctx context.Contex return mapping, nil } -// GetByName calls [ConsumerProvidersPreviewAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. +// GetByName calls [ConsumerProvidersAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. // // Note: All [ProviderInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ConsumerProvidersPreviewAPI) GetByName(ctx context.Context, name string) (*ProviderInfo, error) { +func (a *ConsumerProvidersAPI) GetByName(ctx context.Context, name string) (*ProviderInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListProvidersRequest{}) if err != nil { @@ -532,7 +532,7 @@ func (a *ConsumerProvidersPreviewAPI) GetByName(ctx context.Context, name string return &alternatives[0], nil } -type ProviderExchangeFiltersPreviewInterface interface { +type ProviderExchangeFiltersInterface interface { // Create a new exchange filter. // @@ -563,7 +563,7 @@ type ProviderExchangeFiltersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListExchangeFiltersRequest) ([]ExchangeFilter, error) - // ExchangeFilterNameToIdMap calls [ProviderExchangeFiltersPreviewAPI.ListAll] and creates a map of results with [ExchangeFilter].Name as key and [ExchangeFilter].Id as value. + // ExchangeFilterNameToIdMap calls [ProviderExchangeFiltersAPI.ListAll] and creates a map of results with [ExchangeFilter].Name as key and [ExchangeFilter].Id as value. // // Returns an error if there's more than one [ExchangeFilter] with the same .Name. // @@ -572,7 +572,7 @@ type ProviderExchangeFiltersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ExchangeFilterNameToIdMap(ctx context.Context, request ListExchangeFiltersRequest) (map[string]string, error) - // GetByName calls [ProviderExchangeFiltersPreviewAPI.ExchangeFilterNameToIdMap] and returns a single [ExchangeFilter]. + // GetByName calls [ProviderExchangeFiltersAPI.ExchangeFilterNameToIdMap] and returns a single [ExchangeFilter]. // // Returns an error if there's more than one [ExchangeFilter] with the same .Name. // @@ -587,36 +587,36 @@ type ProviderExchangeFiltersPreviewInterface interface { Update(ctx context.Context, request UpdateExchangeFilterRequest) (*UpdateExchangeFilterResponse, error) } -func NewProviderExchangeFiltersPreview(client *client.DatabricksClient) *ProviderExchangeFiltersPreviewAPI { - return &ProviderExchangeFiltersPreviewAPI{ - providerExchangeFiltersPreviewImpl: providerExchangeFiltersPreviewImpl{ +func NewProviderExchangeFilters(client *client.DatabricksClient) *ProviderExchangeFiltersAPI { + return &ProviderExchangeFiltersAPI{ + providerExchangeFiltersImpl: providerExchangeFiltersImpl{ client: client, }, } } // Marketplace exchanges filters curate which groups can access an exchange. -type ProviderExchangeFiltersPreviewAPI struct { - providerExchangeFiltersPreviewImpl +type ProviderExchangeFiltersAPI struct { + providerExchangeFiltersImpl } // Delete an exchange filter. // // Delete an exchange filter -func (a *ProviderExchangeFiltersPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.providerExchangeFiltersPreviewImpl.Delete(ctx, DeleteExchangeFilterRequest{ +func (a *ProviderExchangeFiltersAPI) DeleteById(ctx context.Context, id string) error { + return a.providerExchangeFiltersImpl.Delete(ctx, DeleteExchangeFilterRequest{ Id: id, }) } -// ExchangeFilterNameToIdMap calls [ProviderExchangeFiltersPreviewAPI.ListAll] and creates a map of results with [ExchangeFilter].Name as key and [ExchangeFilter].Id as value. +// ExchangeFilterNameToIdMap calls [ProviderExchangeFiltersAPI.ListAll] and creates a map of results with [ExchangeFilter].Name as key and [ExchangeFilter].Id as value. // // Returns an error if there's more than one [ExchangeFilter] with the same .Name. // // Note: All [ExchangeFilter] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangeFiltersPreviewAPI) ExchangeFilterNameToIdMap(ctx context.Context, request ListExchangeFiltersRequest) (map[string]string, error) { +func (a *ProviderExchangeFiltersAPI) ExchangeFilterNameToIdMap(ctx context.Context, request ListExchangeFiltersRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -634,14 +634,14 @@ func (a *ProviderExchangeFiltersPreviewAPI) ExchangeFilterNameToIdMap(ctx contex return mapping, nil } -// GetByName calls [ProviderExchangeFiltersPreviewAPI.ExchangeFilterNameToIdMap] and returns a single [ExchangeFilter]. +// GetByName calls [ProviderExchangeFiltersAPI.ExchangeFilterNameToIdMap] and returns a single [ExchangeFilter]. // // Returns an error if there's more than one [ExchangeFilter] with the same .Name. // // Note: All [ExchangeFilter] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangeFiltersPreviewAPI) GetByName(ctx context.Context, name string) (*ExchangeFilter, error) { +func (a *ProviderExchangeFiltersAPI) GetByName(ctx context.Context, name string) (*ExchangeFilter, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListExchangeFiltersRequest{}) if err != nil { @@ -662,7 +662,7 @@ func (a *ProviderExchangeFiltersPreviewAPI) GetByName(ctx context.Context, name return &alternatives[0], nil } -type ProviderExchangesPreviewInterface interface { +type ProviderExchangesInterface interface { // Add an exchange for listing. // @@ -718,7 +718,7 @@ type ProviderExchangesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListExchangesRequest) ([]Exchange, error) - // ExchangeNameToIdMap calls [ProviderExchangesPreviewAPI.ListAll] and creates a map of results with [Exchange].Name as key and [Exchange].Id as value. + // ExchangeNameToIdMap calls [ProviderExchangesAPI.ListAll] and creates a map of results with [Exchange].Name as key and [Exchange].Id as value. // // Returns an error if there's more than one [Exchange] with the same .Name. // @@ -727,7 +727,7 @@ type ProviderExchangesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ExchangeNameToIdMap(ctx context.Context, request ListExchangesRequest) (map[string]string, error) - // GetByName calls [ProviderExchangesPreviewAPI.ExchangeNameToIdMap] and returns a single [Exchange]. + // GetByName calls [ProviderExchangesAPI.ExchangeNameToIdMap] and returns a single [Exchange]. // // Returns an error if there's more than one [Exchange] with the same .Name. // @@ -750,7 +750,7 @@ type ProviderExchangesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListExchangesForListingAll(ctx context.Context, request ListExchangesForListingRequest) ([]ExchangeListing, error) - // ExchangeListingExchangeNameToExchangeIdMap calls [ProviderExchangesPreviewAPI.ListExchangesForListingAll] and creates a map of results with [ExchangeListing].ExchangeName as key and [ExchangeListing].ExchangeId as value. + // ExchangeListingExchangeNameToExchangeIdMap calls [ProviderExchangesAPI.ListExchangesForListingAll] and creates a map of results with [ExchangeListing].ExchangeName as key and [ExchangeListing].ExchangeId as value. // // Returns an error if there's more than one [ExchangeListing] with the same .ExchangeName. // @@ -759,7 +759,7 @@ type ProviderExchangesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ExchangeListingExchangeNameToExchangeIdMap(ctx context.Context, request ListExchangesForListingRequest) (map[string]string, error) - // GetByExchangeName calls [ProviderExchangesPreviewAPI.ExchangeListingExchangeNameToExchangeIdMap] and returns a single [ExchangeListing]. + // GetByExchangeName calls [ProviderExchangesAPI.ExchangeListingExchangeNameToExchangeIdMap] and returns a single [ExchangeListing]. // // Returns an error if there's more than one [ExchangeListing] with the same .ExchangeName. // @@ -782,7 +782,7 @@ type ProviderExchangesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListListingsForExchangeAll(ctx context.Context, request ListListingsForExchangeRequest) ([]ExchangeListing, error) - // ExchangeListingListingNameToListingIdMap calls [ProviderExchangesPreviewAPI.ListListingsForExchangeAll] and creates a map of results with [ExchangeListing].ListingName as key and [ExchangeListing].ListingId as value. + // ExchangeListingListingNameToListingIdMap calls [ProviderExchangesAPI.ListListingsForExchangeAll] and creates a map of results with [ExchangeListing].ListingName as key and [ExchangeListing].ListingId as value. // // Returns an error if there's more than one [ExchangeListing] with the same .ListingName. // @@ -791,7 +791,7 @@ type ProviderExchangesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ExchangeListingListingNameToListingIdMap(ctx context.Context, request ListListingsForExchangeRequest) (map[string]string, error) - // GetByListingName calls [ProviderExchangesPreviewAPI.ExchangeListingListingNameToListingIdMap] and returns a single [ExchangeListing]. + // GetByListingName calls [ProviderExchangesAPI.ExchangeListingListingNameToListingIdMap] and returns a single [ExchangeListing]. // // Returns an error if there's more than one [ExchangeListing] with the same .ListingName. // @@ -806,9 +806,9 @@ type ProviderExchangesPreviewInterface interface { Update(ctx context.Context, request UpdateExchangeRequest) (*UpdateExchangeResponse, error) } -func NewProviderExchangesPreview(client *client.DatabricksClient) *ProviderExchangesPreviewAPI { - return &ProviderExchangesPreviewAPI{ - providerExchangesPreviewImpl: providerExchangesPreviewImpl{ +func NewProviderExchanges(client *client.DatabricksClient) *ProviderExchangesAPI { + return &ProviderExchangesAPI{ + providerExchangesImpl: providerExchangesImpl{ client: client, }, } @@ -816,15 +816,15 @@ func NewProviderExchangesPreview(client *client.DatabricksClient) *ProviderExcha // Marketplace exchanges allow providers to share their listings with a curated // set of customers. -type ProviderExchangesPreviewAPI struct { - providerExchangesPreviewImpl +type ProviderExchangesAPI struct { + providerExchangesImpl } // Delete an exchange. // // This removes a listing from marketplace. -func (a *ProviderExchangesPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.providerExchangesPreviewImpl.Delete(ctx, DeleteExchangeRequest{ +func (a *ProviderExchangesAPI) DeleteById(ctx context.Context, id string) error { + return a.providerExchangesImpl.Delete(ctx, DeleteExchangeRequest{ Id: id, }) } @@ -832,8 +832,8 @@ func (a *ProviderExchangesPreviewAPI) DeleteById(ctx context.Context, id string) // Remove an exchange for listing. // // Disassociate an exchange with a listing -func (a *ProviderExchangesPreviewAPI) DeleteListingFromExchangeById(ctx context.Context, id string) error { - return a.providerExchangesPreviewImpl.DeleteListingFromExchange(ctx, RemoveExchangeForListingRequest{ +func (a *ProviderExchangesAPI) DeleteListingFromExchangeById(ctx context.Context, id string) error { + return a.providerExchangesImpl.DeleteListingFromExchange(ctx, RemoveExchangeForListingRequest{ Id: id, }) } @@ -841,20 +841,20 @@ func (a *ProviderExchangesPreviewAPI) DeleteListingFromExchangeById(ctx context. // Get an exchange. // // Get an exchange. -func (a *ProviderExchangesPreviewAPI) GetById(ctx context.Context, id string) (*GetExchangeResponse, error) { - return a.providerExchangesPreviewImpl.Get(ctx, GetExchangeRequest{ +func (a *ProviderExchangesAPI) GetById(ctx context.Context, id string) (*GetExchangeResponse, error) { + return a.providerExchangesImpl.Get(ctx, GetExchangeRequest{ Id: id, }) } -// ExchangeNameToIdMap calls [ProviderExchangesPreviewAPI.ListAll] and creates a map of results with [Exchange].Name as key and [Exchange].Id as value. +// ExchangeNameToIdMap calls [ProviderExchangesAPI.ListAll] and creates a map of results with [Exchange].Name as key and [Exchange].Id as value. // // Returns an error if there's more than one [Exchange] with the same .Name. // // Note: All [Exchange] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesPreviewAPI) ExchangeNameToIdMap(ctx context.Context, request ListExchangesRequest) (map[string]string, error) { +func (a *ProviderExchangesAPI) ExchangeNameToIdMap(ctx context.Context, request ListExchangesRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -872,14 +872,14 @@ func (a *ProviderExchangesPreviewAPI) ExchangeNameToIdMap(ctx context.Context, r return mapping, nil } -// GetByName calls [ProviderExchangesPreviewAPI.ExchangeNameToIdMap] and returns a single [Exchange]. +// GetByName calls [ProviderExchangesAPI.ExchangeNameToIdMap] and returns a single [Exchange]. // // Returns an error if there's more than one [Exchange] with the same .Name. // // Note: All [Exchange] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesPreviewAPI) GetByName(ctx context.Context, name string) (*Exchange, error) { +func (a *ProviderExchangesAPI) GetByName(ctx context.Context, name string) (*Exchange, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListExchangesRequest{}) if err != nil { @@ -900,14 +900,14 @@ func (a *ProviderExchangesPreviewAPI) GetByName(ctx context.Context, name string return &alternatives[0], nil } -// ExchangeListingExchangeNameToExchangeIdMap calls [ProviderExchangesPreviewAPI.ListExchangesForListingAll] and creates a map of results with [ExchangeListing].ExchangeName as key and [ExchangeListing].ExchangeId as value. +// ExchangeListingExchangeNameToExchangeIdMap calls [ProviderExchangesAPI.ListExchangesForListingAll] and creates a map of results with [ExchangeListing].ExchangeName as key and [ExchangeListing].ExchangeId as value. // // Returns an error if there's more than one [ExchangeListing] with the same .ExchangeName. // // Note: All [ExchangeListing] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesPreviewAPI) ExchangeListingExchangeNameToExchangeIdMap(ctx context.Context, request ListExchangesForListingRequest) (map[string]string, error) { +func (a *ProviderExchangesAPI) ExchangeListingExchangeNameToExchangeIdMap(ctx context.Context, request ListExchangesForListingRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListExchangesForListingAll(ctx, request) @@ -925,14 +925,14 @@ func (a *ProviderExchangesPreviewAPI) ExchangeListingExchangeNameToExchangeIdMap return mapping, nil } -// GetByExchangeName calls [ProviderExchangesPreviewAPI.ExchangeListingExchangeNameToExchangeIdMap] and returns a single [ExchangeListing]. +// GetByExchangeName calls [ProviderExchangesAPI.ExchangeListingExchangeNameToExchangeIdMap] and returns a single [ExchangeListing]. // // Returns an error if there's more than one [ExchangeListing] with the same .ExchangeName. // // Note: All [ExchangeListing] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesPreviewAPI) GetByExchangeName(ctx context.Context, name string) (*ExchangeListing, error) { +func (a *ProviderExchangesAPI) GetByExchangeName(ctx context.Context, name string) (*ExchangeListing, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListExchangesForListingAll(ctx, ListExchangesForListingRequest{}) if err != nil { @@ -953,14 +953,14 @@ func (a *ProviderExchangesPreviewAPI) GetByExchangeName(ctx context.Context, nam return &alternatives[0], nil } -// ExchangeListingListingNameToListingIdMap calls [ProviderExchangesPreviewAPI.ListListingsForExchangeAll] and creates a map of results with [ExchangeListing].ListingName as key and [ExchangeListing].ListingId as value. +// ExchangeListingListingNameToListingIdMap calls [ProviderExchangesAPI.ListListingsForExchangeAll] and creates a map of results with [ExchangeListing].ListingName as key and [ExchangeListing].ListingId as value. // // Returns an error if there's more than one [ExchangeListing] with the same .ListingName. // // Note: All [ExchangeListing] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesPreviewAPI) ExchangeListingListingNameToListingIdMap(ctx context.Context, request ListListingsForExchangeRequest) (map[string]string, error) { +func (a *ProviderExchangesAPI) ExchangeListingListingNameToListingIdMap(ctx context.Context, request ListListingsForExchangeRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListListingsForExchangeAll(ctx, request) @@ -978,14 +978,14 @@ func (a *ProviderExchangesPreviewAPI) ExchangeListingListingNameToListingIdMap(c return mapping, nil } -// GetByListingName calls [ProviderExchangesPreviewAPI.ExchangeListingListingNameToListingIdMap] and returns a single [ExchangeListing]. +// GetByListingName calls [ProviderExchangesAPI.ExchangeListingListingNameToListingIdMap] and returns a single [ExchangeListing]. // // Returns an error if there's more than one [ExchangeListing] with the same .ListingName. // // Note: All [ExchangeListing] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesPreviewAPI) GetByListingName(ctx context.Context, name string) (*ExchangeListing, error) { +func (a *ProviderExchangesAPI) GetByListingName(ctx context.Context, name string) (*ExchangeListing, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListListingsForExchangeAll(ctx, ListListingsForExchangeRequest{}) if err != nil { @@ -1006,7 +1006,7 @@ func (a *ProviderExchangesPreviewAPI) GetByListingName(ctx context.Context, name return &alternatives[0], nil } -type ProviderFilesPreviewInterface interface { +type ProviderFilesInterface interface { // Create a file. // @@ -1048,7 +1048,7 @@ type ProviderFilesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListFilesRequest) ([]FileInfo, error) - // FileInfoDisplayNameToIdMap calls [ProviderFilesPreviewAPI.ListAll] and creates a map of results with [FileInfo].DisplayName as key and [FileInfo].Id as value. + // FileInfoDisplayNameToIdMap calls [ProviderFilesAPI.ListAll] and creates a map of results with [FileInfo].DisplayName as key and [FileInfo].Id as value. // // Returns an error if there's more than one [FileInfo] with the same .DisplayName. // @@ -1057,7 +1057,7 @@ type ProviderFilesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. FileInfoDisplayNameToIdMap(ctx context.Context, request ListFilesRequest) (map[string]string, error) - // GetByDisplayName calls [ProviderFilesPreviewAPI.FileInfoDisplayNameToIdMap] and returns a single [FileInfo]. + // GetByDisplayName calls [ProviderFilesAPI.FileInfoDisplayNameToIdMap] and returns a single [FileInfo]. // // Returns an error if there's more than one [FileInfo] with the same .DisplayName. // @@ -1067,9 +1067,9 @@ type ProviderFilesPreviewInterface interface { GetByDisplayName(ctx context.Context, name string) (*FileInfo, error) } -func NewProviderFilesPreview(client *client.DatabricksClient) *ProviderFilesPreviewAPI { - return &ProviderFilesPreviewAPI{ - providerFilesPreviewImpl: providerFilesPreviewImpl{ +func NewProviderFiles(client *client.DatabricksClient) *ProviderFilesAPI { + return &ProviderFilesAPI{ + providerFilesImpl: providerFilesImpl{ client: client, }, } @@ -1077,15 +1077,15 @@ func NewProviderFilesPreview(client *client.DatabricksClient) *ProviderFilesPrev // Marketplace offers a set of file APIs for various purposes such as preview // notebooks and provider icons. -type ProviderFilesPreviewAPI struct { - providerFilesPreviewImpl +type ProviderFilesAPI struct { + providerFilesImpl } // Delete a file. // // Delete a file -func (a *ProviderFilesPreviewAPI) DeleteByFileId(ctx context.Context, fileId string) error { - return a.providerFilesPreviewImpl.Delete(ctx, DeleteFileRequest{ +func (a *ProviderFilesAPI) DeleteByFileId(ctx context.Context, fileId string) error { + return a.providerFilesImpl.Delete(ctx, DeleteFileRequest{ FileId: fileId, }) } @@ -1093,20 +1093,20 @@ func (a *ProviderFilesPreviewAPI) DeleteByFileId(ctx context.Context, fileId str // Get a file. // // Get a file -func (a *ProviderFilesPreviewAPI) GetByFileId(ctx context.Context, fileId string) (*GetFileResponse, error) { - return a.providerFilesPreviewImpl.Get(ctx, GetFileRequest{ +func (a *ProviderFilesAPI) GetByFileId(ctx context.Context, fileId string) (*GetFileResponse, error) { + return a.providerFilesImpl.Get(ctx, GetFileRequest{ FileId: fileId, }) } -// FileInfoDisplayNameToIdMap calls [ProviderFilesPreviewAPI.ListAll] and creates a map of results with [FileInfo].DisplayName as key and [FileInfo].Id as value. +// FileInfoDisplayNameToIdMap calls [ProviderFilesAPI.ListAll] and creates a map of results with [FileInfo].DisplayName as key and [FileInfo].Id as value. // // Returns an error if there's more than one [FileInfo] with the same .DisplayName. // // Note: All [FileInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderFilesPreviewAPI) FileInfoDisplayNameToIdMap(ctx context.Context, request ListFilesRequest) (map[string]string, error) { +func (a *ProviderFilesAPI) FileInfoDisplayNameToIdMap(ctx context.Context, request ListFilesRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -1124,14 +1124,14 @@ func (a *ProviderFilesPreviewAPI) FileInfoDisplayNameToIdMap(ctx context.Context return mapping, nil } -// GetByDisplayName calls [ProviderFilesPreviewAPI.FileInfoDisplayNameToIdMap] and returns a single [FileInfo]. +// GetByDisplayName calls [ProviderFilesAPI.FileInfoDisplayNameToIdMap] and returns a single [FileInfo]. // // Returns an error if there's more than one [FileInfo] with the same .DisplayName. // // Note: All [FileInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderFilesPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*FileInfo, error) { +func (a *ProviderFilesAPI) GetByDisplayName(ctx context.Context, name string) (*FileInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListFilesRequest{}) if err != nil { @@ -1152,7 +1152,7 @@ func (a *ProviderFilesPreviewAPI) GetByDisplayName(ctx context.Context, name str return &alternatives[0], nil } -type ProviderListingsPreviewInterface interface { +type ProviderListingsInterface interface { // Create a listing. // @@ -1193,7 +1193,7 @@ type ProviderListingsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request GetListingsRequest) ([]Listing, error) - // ListingSummaryNameToIdMap calls [ProviderListingsPreviewAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. + // ListingSummaryNameToIdMap calls [ProviderListingsAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. // // Returns an error if there's more than one [Listing] with the same .Summary.Name. // @@ -1202,7 +1202,7 @@ type ProviderListingsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListingSummaryNameToIdMap(ctx context.Context, request GetListingsRequest) (map[string]string, error) - // GetBySummaryName calls [ProviderListingsPreviewAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. + // GetBySummaryName calls [ProviderListingsAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. // // Returns an error if there's more than one [Listing] with the same .Summary.Name. // @@ -1217,9 +1217,9 @@ type ProviderListingsPreviewInterface interface { Update(ctx context.Context, request UpdateListingRequest) (*UpdateListingResponse, error) } -func NewProviderListingsPreview(client *client.DatabricksClient) *ProviderListingsPreviewAPI { - return &ProviderListingsPreviewAPI{ - providerListingsPreviewImpl: providerListingsPreviewImpl{ +func NewProviderListings(client *client.DatabricksClient) *ProviderListingsAPI { + return &ProviderListingsAPI{ + providerListingsImpl: providerListingsImpl{ client: client, }, } @@ -1227,15 +1227,15 @@ func NewProviderListingsPreview(client *client.DatabricksClient) *ProviderListin // Listings are the core entities in the Marketplace. They represent the // products that are available for consumption. -type ProviderListingsPreviewAPI struct { - providerListingsPreviewImpl +type ProviderListingsAPI struct { + providerListingsImpl } // Delete a listing. // // Delete a listing -func (a *ProviderListingsPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.providerListingsPreviewImpl.Delete(ctx, DeleteListingRequest{ +func (a *ProviderListingsAPI) DeleteById(ctx context.Context, id string) error { + return a.providerListingsImpl.Delete(ctx, DeleteListingRequest{ Id: id, }) } @@ -1243,20 +1243,20 @@ func (a *ProviderListingsPreviewAPI) DeleteById(ctx context.Context, id string) // Get a listing. // // Get a listing -func (a *ProviderListingsPreviewAPI) GetById(ctx context.Context, id string) (*GetListingResponse, error) { - return a.providerListingsPreviewImpl.Get(ctx, GetListingRequest{ +func (a *ProviderListingsAPI) GetById(ctx context.Context, id string) (*GetListingResponse, error) { + return a.providerListingsImpl.Get(ctx, GetListingRequest{ Id: id, }) } -// ListingSummaryNameToIdMap calls [ProviderListingsPreviewAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. +// ListingSummaryNameToIdMap calls [ProviderListingsAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. // // Returns an error if there's more than one [Listing] with the same .Summary.Name. // // Note: All [Listing] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderListingsPreviewAPI) ListingSummaryNameToIdMap(ctx context.Context, request GetListingsRequest) (map[string]string, error) { +func (a *ProviderListingsAPI) ListingSummaryNameToIdMap(ctx context.Context, request GetListingsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -1274,14 +1274,14 @@ func (a *ProviderListingsPreviewAPI) ListingSummaryNameToIdMap(ctx context.Conte return mapping, nil } -// GetBySummaryName calls [ProviderListingsPreviewAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. +// GetBySummaryName calls [ProviderListingsAPI.ListingSummaryNameToIdMap] and returns a single [Listing]. // // Returns an error if there's more than one [Listing] with the same .Summary.Name. // // Note: All [Listing] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderListingsPreviewAPI) GetBySummaryName(ctx context.Context, name string) (*Listing, error) { +func (a *ProviderListingsAPI) GetBySummaryName(ctx context.Context, name string) (*Listing, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, GetListingsRequest{}) if err != nil { @@ -1302,7 +1302,7 @@ func (a *ProviderListingsPreviewAPI) GetBySummaryName(ctx context.Context, name return &alternatives[0], nil } -type ProviderPersonalizationRequestsPreviewInterface interface { +type ProviderPersonalizationRequestsInterface interface { // All personalization requests across all listings. // @@ -1327,9 +1327,9 @@ type ProviderPersonalizationRequestsPreviewInterface interface { Update(ctx context.Context, request UpdatePersonalizationRequestRequest) (*UpdatePersonalizationRequestResponse, error) } -func NewProviderPersonalizationRequestsPreview(client *client.DatabricksClient) *ProviderPersonalizationRequestsPreviewAPI { - return &ProviderPersonalizationRequestsPreviewAPI{ - providerPersonalizationRequestsPreviewImpl: providerPersonalizationRequestsPreviewImpl{ +func NewProviderPersonalizationRequests(client *client.DatabricksClient) *ProviderPersonalizationRequestsAPI { + return &ProviderPersonalizationRequestsAPI{ + providerPersonalizationRequestsImpl: providerPersonalizationRequestsImpl{ client: client, }, } @@ -1337,11 +1337,11 @@ func NewProviderPersonalizationRequestsPreview(client *client.DatabricksClient) // Personalization requests are an alternate to instantly available listings. // Control the lifecycle of personalized solutions. -type ProviderPersonalizationRequestsPreviewAPI struct { - providerPersonalizationRequestsPreviewImpl +type ProviderPersonalizationRequestsAPI struct { + providerPersonalizationRequestsImpl } -type ProviderProviderAnalyticsDashboardsPreviewInterface interface { +type ProviderProviderAnalyticsDashboardsInterface interface { // Create provider analytics dashboard. // @@ -1365,20 +1365,20 @@ type ProviderProviderAnalyticsDashboardsPreviewInterface interface { Update(ctx context.Context, request UpdateProviderAnalyticsDashboardRequest) (*UpdateProviderAnalyticsDashboardResponse, error) } -func NewProviderProviderAnalyticsDashboardsPreview(client *client.DatabricksClient) *ProviderProviderAnalyticsDashboardsPreviewAPI { - return &ProviderProviderAnalyticsDashboardsPreviewAPI{ - providerProviderAnalyticsDashboardsPreviewImpl: providerProviderAnalyticsDashboardsPreviewImpl{ +func NewProviderProviderAnalyticsDashboards(client *client.DatabricksClient) *ProviderProviderAnalyticsDashboardsAPI { + return &ProviderProviderAnalyticsDashboardsAPI{ + providerProviderAnalyticsDashboardsImpl: providerProviderAnalyticsDashboardsImpl{ client: client, }, } } // Manage templated analytics solution for providers. -type ProviderProviderAnalyticsDashboardsPreviewAPI struct { - providerProviderAnalyticsDashboardsPreviewImpl +type ProviderProviderAnalyticsDashboardsAPI struct { + providerProviderAnalyticsDashboardsImpl } -type ProviderProvidersPreviewInterface interface { +type ProviderProvidersInterface interface { // Create a provider. // @@ -1419,7 +1419,7 @@ type ProviderProvidersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) - // ProviderInfoNameToIdMap calls [ProviderProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. + // ProviderInfoNameToIdMap calls [ProviderProvidersAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. // @@ -1428,7 +1428,7 @@ type ProviderProvidersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ProviderInfoNameToIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) - // GetByName calls [ProviderProvidersPreviewAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. + // GetByName calls [ProviderProvidersAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. // @@ -1443,24 +1443,24 @@ type ProviderProvidersPreviewInterface interface { Update(ctx context.Context, request UpdateProviderRequest) (*UpdateProviderResponse, error) } -func NewProviderProvidersPreview(client *client.DatabricksClient) *ProviderProvidersPreviewAPI { - return &ProviderProvidersPreviewAPI{ - providerProvidersPreviewImpl: providerProvidersPreviewImpl{ +func NewProviderProviders(client *client.DatabricksClient) *ProviderProvidersAPI { + return &ProviderProvidersAPI{ + providerProvidersImpl: providerProvidersImpl{ client: client, }, } } // Providers are entities that manage assets in Marketplace. -type ProviderProvidersPreviewAPI struct { - providerProvidersPreviewImpl +type ProviderProvidersAPI struct { + providerProvidersImpl } // Delete provider. // // Delete provider -func (a *ProviderProvidersPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.providerProvidersPreviewImpl.Delete(ctx, DeleteProviderRequest{ +func (a *ProviderProvidersAPI) DeleteById(ctx context.Context, id string) error { + return a.providerProvidersImpl.Delete(ctx, DeleteProviderRequest{ Id: id, }) } @@ -1468,20 +1468,20 @@ func (a *ProviderProvidersPreviewAPI) DeleteById(ctx context.Context, id string) // Get provider. // // Get provider profile -func (a *ProviderProvidersPreviewAPI) GetById(ctx context.Context, id string) (*GetProviderResponse, error) { - return a.providerProvidersPreviewImpl.Get(ctx, GetProviderRequest{ +func (a *ProviderProvidersAPI) GetById(ctx context.Context, id string) (*GetProviderResponse, error) { + return a.providerProvidersImpl.Get(ctx, GetProviderRequest{ Id: id, }) } -// ProviderInfoNameToIdMap calls [ProviderProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. +// ProviderInfoNameToIdMap calls [ProviderProvidersAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. // // Note: All [ProviderInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderProvidersPreviewAPI) ProviderInfoNameToIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) { +func (a *ProviderProvidersAPI) ProviderInfoNameToIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -1499,14 +1499,14 @@ func (a *ProviderProvidersPreviewAPI) ProviderInfoNameToIdMap(ctx context.Contex return mapping, nil } -// GetByName calls [ProviderProvidersPreviewAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. +// GetByName calls [ProviderProvidersAPI.ProviderInfoNameToIdMap] and returns a single [ProviderInfo]. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. // // Note: All [ProviderInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ProviderProvidersPreviewAPI) GetByName(ctx context.Context, name string) (*ProviderInfo, error) { +func (a *ProviderProvidersAPI) GetByName(ctx context.Context, name string) (*ProviderInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListProvidersRequest{}) if err != nil { diff --git a/marketplace/v2preview/client.go b/marketplace/v2preview/client.go index 4c7590fdb..a0785438e 100755 --- a/marketplace/v2preview/client.go +++ b/marketplace/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type ConsumerFulfillmentsPreviewClient struct { - ConsumerFulfillmentsPreviewInterface +type ConsumerFulfillmentsClient struct { + ConsumerFulfillmentsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewConsumerFulfillmentsPreviewClient(cfg *config.Config) (*ConsumerFulfillmentsPreviewClient, error) { +func NewConsumerFulfillmentsClient(cfg *config.Config) (*ConsumerFulfillmentsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewConsumerFulfillmentsPreviewClient(cfg *config.Config) (*ConsumerFulfillm return nil, err } - return &ConsumerFulfillmentsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ConsumerFulfillmentsPreviewInterface: NewConsumerFulfillmentsPreview(databricksClient), + return &ConsumerFulfillmentsClient{ + Config: cfg, + apiClient: apiClient, + ConsumerFulfillmentsInterface: NewConsumerFulfillments(databricksClient), }, nil } -type ConsumerInstallationsPreviewClient struct { - ConsumerInstallationsPreviewInterface +type ConsumerInstallationsClient struct { + ConsumerInstallationsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewConsumerInstallationsPreviewClient(cfg *config.Config) (*ConsumerInstallationsPreviewClient, error) { +func NewConsumerInstallationsClient(cfg *config.Config) (*ConsumerInstallationsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,20 +71,20 @@ func NewConsumerInstallationsPreviewClient(cfg *config.Config) (*ConsumerInstall return nil, err } - return &ConsumerInstallationsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ConsumerInstallationsPreviewInterface: NewConsumerInstallationsPreview(databricksClient), + return &ConsumerInstallationsClient{ + Config: cfg, + apiClient: apiClient, + ConsumerInstallationsInterface: NewConsumerInstallations(databricksClient), }, nil } -type ConsumerListingsPreviewClient struct { - ConsumerListingsPreviewInterface +type ConsumerListingsClient struct { + ConsumerListingsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewConsumerListingsPreviewClient(cfg *config.Config) (*ConsumerListingsPreviewClient, error) { +func NewConsumerListingsClient(cfg *config.Config) (*ConsumerListingsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -105,20 +105,20 @@ func NewConsumerListingsPreviewClient(cfg *config.Config) (*ConsumerListingsPrev return nil, err } - return &ConsumerListingsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ConsumerListingsPreviewInterface: NewConsumerListingsPreview(databricksClient), + return &ConsumerListingsClient{ + Config: cfg, + apiClient: apiClient, + ConsumerListingsInterface: NewConsumerListings(databricksClient), }, nil } -type ConsumerPersonalizationRequestsPreviewClient struct { - ConsumerPersonalizationRequestsPreviewInterface +type ConsumerPersonalizationRequestsClient struct { + ConsumerPersonalizationRequestsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewConsumerPersonalizationRequestsPreviewClient(cfg *config.Config) (*ConsumerPersonalizationRequestsPreviewClient, error) { +func NewConsumerPersonalizationRequestsClient(cfg *config.Config) (*ConsumerPersonalizationRequestsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -139,20 +139,20 @@ func NewConsumerPersonalizationRequestsPreviewClient(cfg *config.Config) (*Consu return nil, err } - return &ConsumerPersonalizationRequestsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ConsumerPersonalizationRequestsPreviewInterface: NewConsumerPersonalizationRequestsPreview(databricksClient), + return &ConsumerPersonalizationRequestsClient{ + Config: cfg, + apiClient: apiClient, + ConsumerPersonalizationRequestsInterface: NewConsumerPersonalizationRequests(databricksClient), }, nil } -type ConsumerProvidersPreviewClient struct { - ConsumerProvidersPreviewInterface +type ConsumerProvidersClient struct { + ConsumerProvidersInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewConsumerProvidersPreviewClient(cfg *config.Config) (*ConsumerProvidersPreviewClient, error) { +func NewConsumerProvidersClient(cfg *config.Config) (*ConsumerProvidersClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -173,20 +173,20 @@ func NewConsumerProvidersPreviewClient(cfg *config.Config) (*ConsumerProvidersPr return nil, err } - return &ConsumerProvidersPreviewClient{ - Config: cfg, - apiClient: apiClient, - ConsumerProvidersPreviewInterface: NewConsumerProvidersPreview(databricksClient), + return &ConsumerProvidersClient{ + Config: cfg, + apiClient: apiClient, + ConsumerProvidersInterface: NewConsumerProviders(databricksClient), }, nil } -type ProviderExchangeFiltersPreviewClient struct { - ProviderExchangeFiltersPreviewInterface +type ProviderExchangeFiltersClient struct { + ProviderExchangeFiltersInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewProviderExchangeFiltersPreviewClient(cfg *config.Config) (*ProviderExchangeFiltersPreviewClient, error) { +func NewProviderExchangeFiltersClient(cfg *config.Config) (*ProviderExchangeFiltersClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -207,20 +207,20 @@ func NewProviderExchangeFiltersPreviewClient(cfg *config.Config) (*ProviderExcha return nil, err } - return &ProviderExchangeFiltersPreviewClient{ - Config: cfg, - apiClient: apiClient, - ProviderExchangeFiltersPreviewInterface: NewProviderExchangeFiltersPreview(databricksClient), + return &ProviderExchangeFiltersClient{ + Config: cfg, + apiClient: apiClient, + ProviderExchangeFiltersInterface: NewProviderExchangeFilters(databricksClient), }, nil } -type ProviderExchangesPreviewClient struct { - ProviderExchangesPreviewInterface +type ProviderExchangesClient struct { + ProviderExchangesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewProviderExchangesPreviewClient(cfg *config.Config) (*ProviderExchangesPreviewClient, error) { +func NewProviderExchangesClient(cfg *config.Config) (*ProviderExchangesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -241,20 +241,20 @@ func NewProviderExchangesPreviewClient(cfg *config.Config) (*ProviderExchangesPr return nil, err } - return &ProviderExchangesPreviewClient{ - Config: cfg, - apiClient: apiClient, - ProviderExchangesPreviewInterface: NewProviderExchangesPreview(databricksClient), + return &ProviderExchangesClient{ + Config: cfg, + apiClient: apiClient, + ProviderExchangesInterface: NewProviderExchanges(databricksClient), }, nil } -type ProviderFilesPreviewClient struct { - ProviderFilesPreviewInterface +type ProviderFilesClient struct { + ProviderFilesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewProviderFilesPreviewClient(cfg *config.Config) (*ProviderFilesPreviewClient, error) { +func NewProviderFilesClient(cfg *config.Config) (*ProviderFilesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -275,20 +275,20 @@ func NewProviderFilesPreviewClient(cfg *config.Config) (*ProviderFilesPreviewCli return nil, err } - return &ProviderFilesPreviewClient{ - Config: cfg, - apiClient: apiClient, - ProviderFilesPreviewInterface: NewProviderFilesPreview(databricksClient), + return &ProviderFilesClient{ + Config: cfg, + apiClient: apiClient, + ProviderFilesInterface: NewProviderFiles(databricksClient), }, nil } -type ProviderListingsPreviewClient struct { - ProviderListingsPreviewInterface +type ProviderListingsClient struct { + ProviderListingsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewProviderListingsPreviewClient(cfg *config.Config) (*ProviderListingsPreviewClient, error) { +func NewProviderListingsClient(cfg *config.Config) (*ProviderListingsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -309,20 +309,20 @@ func NewProviderListingsPreviewClient(cfg *config.Config) (*ProviderListingsPrev return nil, err } - return &ProviderListingsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ProviderListingsPreviewInterface: NewProviderListingsPreview(databricksClient), + return &ProviderListingsClient{ + Config: cfg, + apiClient: apiClient, + ProviderListingsInterface: NewProviderListings(databricksClient), }, nil } -type ProviderPersonalizationRequestsPreviewClient struct { - ProviderPersonalizationRequestsPreviewInterface +type ProviderPersonalizationRequestsClient struct { + ProviderPersonalizationRequestsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewProviderPersonalizationRequestsPreviewClient(cfg *config.Config) (*ProviderPersonalizationRequestsPreviewClient, error) { +func NewProviderPersonalizationRequestsClient(cfg *config.Config) (*ProviderPersonalizationRequestsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -343,20 +343,20 @@ func NewProviderPersonalizationRequestsPreviewClient(cfg *config.Config) (*Provi return nil, err } - return &ProviderPersonalizationRequestsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ProviderPersonalizationRequestsPreviewInterface: NewProviderPersonalizationRequestsPreview(databricksClient), + return &ProviderPersonalizationRequestsClient{ + Config: cfg, + apiClient: apiClient, + ProviderPersonalizationRequestsInterface: NewProviderPersonalizationRequests(databricksClient), }, nil } -type ProviderProviderAnalyticsDashboardsPreviewClient struct { - ProviderProviderAnalyticsDashboardsPreviewInterface +type ProviderProviderAnalyticsDashboardsClient struct { + ProviderProviderAnalyticsDashboardsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewProviderProviderAnalyticsDashboardsPreviewClient(cfg *config.Config) (*ProviderProviderAnalyticsDashboardsPreviewClient, error) { +func NewProviderProviderAnalyticsDashboardsClient(cfg *config.Config) (*ProviderProviderAnalyticsDashboardsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -377,20 +377,20 @@ func NewProviderProviderAnalyticsDashboardsPreviewClient(cfg *config.Config) (*P return nil, err } - return &ProviderProviderAnalyticsDashboardsPreviewClient{ + return &ProviderProviderAnalyticsDashboardsClient{ Config: cfg, apiClient: apiClient, - ProviderProviderAnalyticsDashboardsPreviewInterface: NewProviderProviderAnalyticsDashboardsPreview(databricksClient), + ProviderProviderAnalyticsDashboardsInterface: NewProviderProviderAnalyticsDashboards(databricksClient), }, nil } -type ProviderProvidersPreviewClient struct { - ProviderProvidersPreviewInterface +type ProviderProvidersClient struct { + ProviderProvidersInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewProviderProvidersPreviewClient(cfg *config.Config) (*ProviderProvidersPreviewClient, error) { +func NewProviderProvidersClient(cfg *config.Config) (*ProviderProvidersClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -411,9 +411,9 @@ func NewProviderProvidersPreviewClient(cfg *config.Config) (*ProviderProvidersPr return nil, err } - return &ProviderProvidersPreviewClient{ - Config: cfg, - apiClient: apiClient, - ProviderProvidersPreviewInterface: NewProviderProvidersPreview(databricksClient), + return &ProviderProvidersClient{ + Config: cfg, + apiClient: apiClient, + ProviderProvidersInterface: NewProviderProviders(databricksClient), }, nil } diff --git a/marketplace/v2preview/impl.go b/marketplace/v2preview/impl.go index 150e929c3..875647a2b 100755 --- a/marketplace/v2preview/impl.go +++ b/marketplace/v2preview/impl.go @@ -12,15 +12,15 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just ConsumerFulfillmentsPreview API methods -type consumerFulfillmentsPreviewImpl struct { +// unexported type that holds implementations of just ConsumerFulfillments API methods +type consumerFulfillmentsImpl struct { client *client.DatabricksClient } // Get listing content metadata. // // Get a high level preview of the metadata of listing installable content. -func (a *consumerFulfillmentsPreviewImpl) Get(ctx context.Context, request GetListingContentMetadataRequest) listing.Iterator[SharedDataObject] { +func (a *consumerFulfillmentsImpl) Get(ctx context.Context, request GetListingContentMetadataRequest) listing.Iterator[SharedDataObject] { getNextPage := func(ctx context.Context, req GetListingContentMetadataRequest) (*GetListingContentMetadataResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -47,11 +47,11 @@ func (a *consumerFulfillmentsPreviewImpl) Get(ctx context.Context, request GetLi // Get listing content metadata. // // Get a high level preview of the metadata of listing installable content. -func (a *consumerFulfillmentsPreviewImpl) GetAll(ctx context.Context, request GetListingContentMetadataRequest) ([]SharedDataObject, error) { +func (a *consumerFulfillmentsImpl) GetAll(ctx context.Context, request GetListingContentMetadataRequest) ([]SharedDataObject, error) { iterator := a.Get(ctx, request) return listing.ToSlice[SharedDataObject](ctx, iterator) } -func (a *consumerFulfillmentsPreviewImpl) internalGet(ctx context.Context, request GetListingContentMetadataRequest) (*GetListingContentMetadataResponse, error) { +func (a *consumerFulfillmentsImpl) internalGet(ctx context.Context, request GetListingContentMetadataRequest) (*GetListingContentMetadataResponse, error) { var getListingContentMetadataResponse GetListingContentMetadataResponse path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/content", request.ListingId) queryParams := make(map[string]any) @@ -68,7 +68,7 @@ func (a *consumerFulfillmentsPreviewImpl) internalGet(ctx context.Context, reque // attached share or git repo. Only one of these fields will be present. // Personalized installations contain metadata about the attached share or git // repo, as well as the Delta Sharing recipient type. -func (a *consumerFulfillmentsPreviewImpl) List(ctx context.Context, request ListFulfillmentsRequest) listing.Iterator[ListingFulfillment] { +func (a *consumerFulfillmentsImpl) List(ctx context.Context, request ListFulfillmentsRequest) listing.Iterator[ListingFulfillment] { getNextPage := func(ctx context.Context, req ListFulfillmentsRequest) (*ListFulfillmentsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -99,11 +99,11 @@ func (a *consumerFulfillmentsPreviewImpl) List(ctx context.Context, request List // attached share or git repo. Only one of these fields will be present. // Personalized installations contain metadata about the attached share or git // repo, as well as the Delta Sharing recipient type. -func (a *consumerFulfillmentsPreviewImpl) ListAll(ctx context.Context, request ListFulfillmentsRequest) ([]ListingFulfillment, error) { +func (a *consumerFulfillmentsImpl) ListAll(ctx context.Context, request ListFulfillmentsRequest) ([]ListingFulfillment, error) { iterator := a.List(ctx, request) return listing.ToSlice[ListingFulfillment](ctx, iterator) } -func (a *consumerFulfillmentsPreviewImpl) internalList(ctx context.Context, request ListFulfillmentsRequest) (*ListFulfillmentsResponse, error) { +func (a *consumerFulfillmentsImpl) internalList(ctx context.Context, request ListFulfillmentsRequest) (*ListFulfillmentsResponse, error) { var listFulfillmentsResponse ListFulfillmentsResponse path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/fulfillments", request.ListingId) queryParams := make(map[string]any) @@ -113,12 +113,12 @@ func (a *consumerFulfillmentsPreviewImpl) internalList(ctx context.Context, requ return &listFulfillmentsResponse, err } -// unexported type that holds implementations of just ConsumerInstallationsPreview API methods -type consumerInstallationsPreviewImpl struct { +// unexported type that holds implementations of just ConsumerInstallations API methods +type consumerInstallationsImpl struct { client *client.DatabricksClient } -func (a *consumerInstallationsPreviewImpl) Create(ctx context.Context, request CreateInstallationRequest) (*Installation, error) { +func (a *consumerInstallationsImpl) Create(ctx context.Context, request CreateInstallationRequest) (*Installation, error) { var installation Installation path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/installations", request.ListingId) queryParams := make(map[string]any) @@ -129,7 +129,7 @@ func (a *consumerInstallationsPreviewImpl) Create(ctx context.Context, request C return &installation, err } -func (a *consumerInstallationsPreviewImpl) Delete(ctx context.Context, request DeleteInstallationRequest) error { +func (a *consumerInstallationsImpl) Delete(ctx context.Context, request DeleteInstallationRequest) error { var deleteInstallationResponse DeleteInstallationResponse path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/installations/%v", request.ListingId, request.InstallationId) queryParams := make(map[string]any) @@ -142,7 +142,7 @@ func (a *consumerInstallationsPreviewImpl) Delete(ctx context.Context, request D // List all installations. // // List all installations across all listings. -func (a *consumerInstallationsPreviewImpl) List(ctx context.Context, request ListAllInstallationsRequest) listing.Iterator[InstallationDetail] { +func (a *consumerInstallationsImpl) List(ctx context.Context, request ListAllInstallationsRequest) listing.Iterator[InstallationDetail] { getNextPage := func(ctx context.Context, req ListAllInstallationsRequest) (*ListAllInstallationsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -169,11 +169,11 @@ func (a *consumerInstallationsPreviewImpl) List(ctx context.Context, request Lis // List all installations. // // List all installations across all listings. -func (a *consumerInstallationsPreviewImpl) ListAll(ctx context.Context, request ListAllInstallationsRequest) ([]InstallationDetail, error) { +func (a *consumerInstallationsImpl) ListAll(ctx context.Context, request ListAllInstallationsRequest) ([]InstallationDetail, error) { iterator := a.List(ctx, request) return listing.ToSlice[InstallationDetail](ctx, iterator) } -func (a *consumerInstallationsPreviewImpl) internalList(ctx context.Context, request ListAllInstallationsRequest) (*ListAllInstallationsResponse, error) { +func (a *consumerInstallationsImpl) internalList(ctx context.Context, request ListAllInstallationsRequest) (*ListAllInstallationsResponse, error) { var listAllInstallationsResponse ListAllInstallationsResponse path := "/api/2.1preview/marketplace-consumer/installations" queryParams := make(map[string]any) @@ -186,7 +186,7 @@ func (a *consumerInstallationsPreviewImpl) internalList(ctx context.Context, req // List installations for a listing. // // List all installations for a particular listing. -func (a *consumerInstallationsPreviewImpl) ListListingInstallations(ctx context.Context, request ListInstallationsRequest) listing.Iterator[InstallationDetail] { +func (a *consumerInstallationsImpl) ListListingInstallations(ctx context.Context, request ListInstallationsRequest) listing.Iterator[InstallationDetail] { getNextPage := func(ctx context.Context, req ListInstallationsRequest) (*ListInstallationsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -213,11 +213,11 @@ func (a *consumerInstallationsPreviewImpl) ListListingInstallations(ctx context. // List installations for a listing. // // List all installations for a particular listing. -func (a *consumerInstallationsPreviewImpl) ListListingInstallationsAll(ctx context.Context, request ListInstallationsRequest) ([]InstallationDetail, error) { +func (a *consumerInstallationsImpl) ListListingInstallationsAll(ctx context.Context, request ListInstallationsRequest) ([]InstallationDetail, error) { iterator := a.ListListingInstallations(ctx, request) return listing.ToSlice[InstallationDetail](ctx, iterator) } -func (a *consumerInstallationsPreviewImpl) internalListListingInstallations(ctx context.Context, request ListInstallationsRequest) (*ListInstallationsResponse, error) { +func (a *consumerInstallationsImpl) internalListListingInstallations(ctx context.Context, request ListInstallationsRequest) (*ListInstallationsResponse, error) { var listInstallationsResponse ListInstallationsResponse path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/installations", request.ListingId) queryParams := make(map[string]any) @@ -227,7 +227,7 @@ func (a *consumerInstallationsPreviewImpl) internalListListingInstallations(ctx return &listInstallationsResponse, err } -func (a *consumerInstallationsPreviewImpl) Update(ctx context.Context, request UpdateInstallationRequest) (*UpdateInstallationResponse, error) { +func (a *consumerInstallationsImpl) Update(ctx context.Context, request UpdateInstallationRequest) (*UpdateInstallationResponse, error) { var updateInstallationResponse UpdateInstallationResponse path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/installations/%v", request.ListingId, request.InstallationId) queryParams := make(map[string]any) @@ -238,12 +238,12 @@ func (a *consumerInstallationsPreviewImpl) Update(ctx context.Context, request U return &updateInstallationResponse, err } -// unexported type that holds implementations of just ConsumerListingsPreview API methods -type consumerListingsPreviewImpl struct { +// unexported type that holds implementations of just ConsumerListings API methods +type consumerListingsImpl struct { client *client.DatabricksClient } -func (a *consumerListingsPreviewImpl) BatchGet(ctx context.Context, request BatchGetListingsRequest) (*BatchGetListingsResponse, error) { +func (a *consumerListingsImpl) BatchGet(ctx context.Context, request BatchGetListingsRequest) (*BatchGetListingsResponse, error) { var batchGetListingsResponse BatchGetListingsResponse path := "/api/2.1preview/marketplace-consumer/listings:batchGet" queryParams := make(map[string]any) @@ -253,7 +253,7 @@ func (a *consumerListingsPreviewImpl) BatchGet(ctx context.Context, request Batc return &batchGetListingsResponse, err } -func (a *consumerListingsPreviewImpl) Get(ctx context.Context, request GetListingRequest) (*GetListingResponse, error) { +func (a *consumerListingsImpl) Get(ctx context.Context, request GetListingRequest) (*GetListingResponse, error) { var getListingResponse GetListingResponse path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v", request.Id) queryParams := make(map[string]any) @@ -267,7 +267,7 @@ func (a *consumerListingsPreviewImpl) Get(ctx context.Context, request GetListin // // List all published listings in the Databricks Marketplace that the consumer // has access to. -func (a *consumerListingsPreviewImpl) List(ctx context.Context, request ListListingsRequest) listing.Iterator[Listing] { +func (a *consumerListingsImpl) List(ctx context.Context, request ListListingsRequest) listing.Iterator[Listing] { getNextPage := func(ctx context.Context, req ListListingsRequest) (*ListListingsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -295,11 +295,11 @@ func (a *consumerListingsPreviewImpl) List(ctx context.Context, request ListList // // List all published listings in the Databricks Marketplace that the consumer // has access to. -func (a *consumerListingsPreviewImpl) ListAll(ctx context.Context, request ListListingsRequest) ([]Listing, error) { +func (a *consumerListingsImpl) ListAll(ctx context.Context, request ListListingsRequest) ([]Listing, error) { iterator := a.List(ctx, request) return listing.ToSlice[Listing](ctx, iterator) } -func (a *consumerListingsPreviewImpl) internalList(ctx context.Context, request ListListingsRequest) (*ListListingsResponse, error) { +func (a *consumerListingsImpl) internalList(ctx context.Context, request ListListingsRequest) (*ListListingsResponse, error) { var listListingsResponse ListListingsResponse path := "/api/2.1preview/marketplace-consumer/listings" queryParams := make(map[string]any) @@ -314,7 +314,7 @@ func (a *consumerListingsPreviewImpl) internalList(ctx context.Context, request // Search published listings in the Databricks Marketplace that the consumer has // access to. This query supports a variety of different search parameters and // performs fuzzy matching. -func (a *consumerListingsPreviewImpl) Search(ctx context.Context, request SearchListingsRequest) listing.Iterator[Listing] { +func (a *consumerListingsImpl) Search(ctx context.Context, request SearchListingsRequest) listing.Iterator[Listing] { getNextPage := func(ctx context.Context, req SearchListingsRequest) (*SearchListingsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -343,11 +343,11 @@ func (a *consumerListingsPreviewImpl) Search(ctx context.Context, request Search // Search published listings in the Databricks Marketplace that the consumer has // access to. This query supports a variety of different search parameters and // performs fuzzy matching. -func (a *consumerListingsPreviewImpl) SearchAll(ctx context.Context, request SearchListingsRequest) ([]Listing, error) { +func (a *consumerListingsImpl) SearchAll(ctx context.Context, request SearchListingsRequest) ([]Listing, error) { iterator := a.Search(ctx, request) return listing.ToSlice[Listing](ctx, iterator) } -func (a *consumerListingsPreviewImpl) internalSearch(ctx context.Context, request SearchListingsRequest) (*SearchListingsResponse, error) { +func (a *consumerListingsImpl) internalSearch(ctx context.Context, request SearchListingsRequest) (*SearchListingsResponse, error) { var searchListingsResponse SearchListingsResponse path := "/api/2.1preview/marketplace-consumer/search-listings" queryParams := make(map[string]any) @@ -357,12 +357,12 @@ func (a *consumerListingsPreviewImpl) internalSearch(ctx context.Context, reques return &searchListingsResponse, err } -// unexported type that holds implementations of just ConsumerPersonalizationRequestsPreview API methods -type consumerPersonalizationRequestsPreviewImpl struct { +// unexported type that holds implementations of just ConsumerPersonalizationRequests API methods +type consumerPersonalizationRequestsImpl struct { client *client.DatabricksClient } -func (a *consumerPersonalizationRequestsPreviewImpl) Create(ctx context.Context, request CreatePersonalizationRequest) (*CreatePersonalizationRequestResponse, error) { +func (a *consumerPersonalizationRequestsImpl) Create(ctx context.Context, request CreatePersonalizationRequest) (*CreatePersonalizationRequestResponse, error) { var createPersonalizationRequestResponse CreatePersonalizationRequestResponse path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/personalization-requests", request.ListingId) queryParams := make(map[string]any) @@ -373,7 +373,7 @@ func (a *consumerPersonalizationRequestsPreviewImpl) Create(ctx context.Context, return &createPersonalizationRequestResponse, err } -func (a *consumerPersonalizationRequestsPreviewImpl) Get(ctx context.Context, request GetPersonalizationRequestRequest) (*GetPersonalizationRequestResponse, error) { +func (a *consumerPersonalizationRequestsImpl) Get(ctx context.Context, request GetPersonalizationRequestRequest) (*GetPersonalizationRequestResponse, error) { var getPersonalizationRequestResponse GetPersonalizationRequestResponse path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/listings/%v/personalization-requests", request.ListingId) queryParams := make(map[string]any) @@ -386,7 +386,7 @@ func (a *consumerPersonalizationRequestsPreviewImpl) Get(ctx context.Context, re // List all personalization requests. // // List personalization requests for a consumer across all listings. -func (a *consumerPersonalizationRequestsPreviewImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] { +func (a *consumerPersonalizationRequestsImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] { getNextPage := func(ctx context.Context, req ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -413,11 +413,11 @@ func (a *consumerPersonalizationRequestsPreviewImpl) List(ctx context.Context, r // List all personalization requests. // // List personalization requests for a consumer across all listings. -func (a *consumerPersonalizationRequestsPreviewImpl) ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) { +func (a *consumerPersonalizationRequestsImpl) ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) { iterator := a.List(ctx, request) return listing.ToSlice[PersonalizationRequest](ctx, iterator) } -func (a *consumerPersonalizationRequestsPreviewImpl) internalList(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { +func (a *consumerPersonalizationRequestsImpl) internalList(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { var listAllPersonalizationRequestsResponse ListAllPersonalizationRequestsResponse path := "/api/2.1preview/marketplace-consumer/personalization-requests" queryParams := make(map[string]any) @@ -427,12 +427,12 @@ func (a *consumerPersonalizationRequestsPreviewImpl) internalList(ctx context.Co return &listAllPersonalizationRequestsResponse, err } -// unexported type that holds implementations of just ConsumerProvidersPreview API methods -type consumerProvidersPreviewImpl struct { +// unexported type that holds implementations of just ConsumerProviders API methods +type consumerProvidersImpl struct { client *client.DatabricksClient } -func (a *consumerProvidersPreviewImpl) BatchGet(ctx context.Context, request BatchGetProvidersRequest) (*BatchGetProvidersResponse, error) { +func (a *consumerProvidersImpl) BatchGet(ctx context.Context, request BatchGetProvidersRequest) (*BatchGetProvidersResponse, error) { var batchGetProvidersResponse BatchGetProvidersResponse path := "/api/2.1preview/marketplace-consumer/providers:batchGet" queryParams := make(map[string]any) @@ -442,7 +442,7 @@ func (a *consumerProvidersPreviewImpl) BatchGet(ctx context.Context, request Bat return &batchGetProvidersResponse, err } -func (a *consumerProvidersPreviewImpl) Get(ctx context.Context, request GetProviderRequest) (*GetProviderResponse, error) { +func (a *consumerProvidersImpl) Get(ctx context.Context, request GetProviderRequest) (*GetProviderResponse, error) { var getProviderResponse GetProviderResponse path := fmt.Sprintf("/api/2.1preview/marketplace-consumer/providers/%v", request.Id) queryParams := make(map[string]any) @@ -456,7 +456,7 @@ func (a *consumerProvidersPreviewImpl) Get(ctx context.Context, request GetProvi // // List all providers in the Databricks Marketplace with at least one visible // listing. -func (a *consumerProvidersPreviewImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { +func (a *consumerProvidersImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -484,11 +484,11 @@ func (a *consumerProvidersPreviewImpl) List(ctx context.Context, request ListPro // // List all providers in the Databricks Marketplace with at least one visible // listing. -func (a *consumerProvidersPreviewImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { +func (a *consumerProvidersImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[ProviderInfo](ctx, iterator) } -func (a *consumerProvidersPreviewImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { +func (a *consumerProvidersImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { var listProvidersResponse ListProvidersResponse path := "/api/2.1preview/marketplace-consumer/providers" queryParams := make(map[string]any) @@ -498,12 +498,12 @@ func (a *consumerProvidersPreviewImpl) internalList(ctx context.Context, request return &listProvidersResponse, err } -// unexported type that holds implementations of just ProviderExchangeFiltersPreview API methods -type providerExchangeFiltersPreviewImpl struct { +// unexported type that holds implementations of just ProviderExchangeFilters API methods +type providerExchangeFiltersImpl struct { client *client.DatabricksClient } -func (a *providerExchangeFiltersPreviewImpl) Create(ctx context.Context, request CreateExchangeFilterRequest) (*CreateExchangeFilterResponse, error) { +func (a *providerExchangeFiltersImpl) Create(ctx context.Context, request CreateExchangeFilterRequest) (*CreateExchangeFilterResponse, error) { var createExchangeFilterResponse CreateExchangeFilterResponse path := "/api/2.0preview/marketplace-exchange/filters" queryParams := make(map[string]any) @@ -514,7 +514,7 @@ func (a *providerExchangeFiltersPreviewImpl) Create(ctx context.Context, request return &createExchangeFilterResponse, err } -func (a *providerExchangeFiltersPreviewImpl) Delete(ctx context.Context, request DeleteExchangeFilterRequest) error { +func (a *providerExchangeFiltersImpl) Delete(ctx context.Context, request DeleteExchangeFilterRequest) error { var deleteExchangeFilterResponse DeleteExchangeFilterResponse path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/filters/%v", request.Id) queryParams := make(map[string]any) @@ -527,7 +527,7 @@ func (a *providerExchangeFiltersPreviewImpl) Delete(ctx context.Context, request // List exchange filters. // // List exchange filter -func (a *providerExchangeFiltersPreviewImpl) List(ctx context.Context, request ListExchangeFiltersRequest) listing.Iterator[ExchangeFilter] { +func (a *providerExchangeFiltersImpl) List(ctx context.Context, request ListExchangeFiltersRequest) listing.Iterator[ExchangeFilter] { getNextPage := func(ctx context.Context, req ListExchangeFiltersRequest) (*ListExchangeFiltersResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -554,11 +554,11 @@ func (a *providerExchangeFiltersPreviewImpl) List(ctx context.Context, request L // List exchange filters. // // List exchange filter -func (a *providerExchangeFiltersPreviewImpl) ListAll(ctx context.Context, request ListExchangeFiltersRequest) ([]ExchangeFilter, error) { +func (a *providerExchangeFiltersImpl) ListAll(ctx context.Context, request ListExchangeFiltersRequest) ([]ExchangeFilter, error) { iterator := a.List(ctx, request) return listing.ToSlice[ExchangeFilter](ctx, iterator) } -func (a *providerExchangeFiltersPreviewImpl) internalList(ctx context.Context, request ListExchangeFiltersRequest) (*ListExchangeFiltersResponse, error) { +func (a *providerExchangeFiltersImpl) internalList(ctx context.Context, request ListExchangeFiltersRequest) (*ListExchangeFiltersResponse, error) { var listExchangeFiltersResponse ListExchangeFiltersResponse path := "/api/2.0preview/marketplace-exchange/filters" queryParams := make(map[string]any) @@ -568,7 +568,7 @@ func (a *providerExchangeFiltersPreviewImpl) internalList(ctx context.Context, r return &listExchangeFiltersResponse, err } -func (a *providerExchangeFiltersPreviewImpl) Update(ctx context.Context, request UpdateExchangeFilterRequest) (*UpdateExchangeFilterResponse, error) { +func (a *providerExchangeFiltersImpl) Update(ctx context.Context, request UpdateExchangeFilterRequest) (*UpdateExchangeFilterResponse, error) { var updateExchangeFilterResponse UpdateExchangeFilterResponse path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/filters/%v", request.Id) queryParams := make(map[string]any) @@ -579,12 +579,12 @@ func (a *providerExchangeFiltersPreviewImpl) Update(ctx context.Context, request return &updateExchangeFilterResponse, err } -// unexported type that holds implementations of just ProviderExchangesPreview API methods -type providerExchangesPreviewImpl struct { +// unexported type that holds implementations of just ProviderExchanges API methods +type providerExchangesImpl struct { client *client.DatabricksClient } -func (a *providerExchangesPreviewImpl) AddListingToExchange(ctx context.Context, request AddExchangeForListingRequest) (*AddExchangeForListingResponse, error) { +func (a *providerExchangesImpl) AddListingToExchange(ctx context.Context, request AddExchangeForListingRequest) (*AddExchangeForListingResponse, error) { var addExchangeForListingResponse AddExchangeForListingResponse path := "/api/2.0preview/marketplace-exchange/exchanges-for-listing" queryParams := make(map[string]any) @@ -595,7 +595,7 @@ func (a *providerExchangesPreviewImpl) AddListingToExchange(ctx context.Context, return &addExchangeForListingResponse, err } -func (a *providerExchangesPreviewImpl) Create(ctx context.Context, request CreateExchangeRequest) (*CreateExchangeResponse, error) { +func (a *providerExchangesImpl) Create(ctx context.Context, request CreateExchangeRequest) (*CreateExchangeResponse, error) { var createExchangeResponse CreateExchangeResponse path := "/api/2.0preview/marketplace-exchange/exchanges" queryParams := make(map[string]any) @@ -606,7 +606,7 @@ func (a *providerExchangesPreviewImpl) Create(ctx context.Context, request Creat return &createExchangeResponse, err } -func (a *providerExchangesPreviewImpl) Delete(ctx context.Context, request DeleteExchangeRequest) error { +func (a *providerExchangesImpl) Delete(ctx context.Context, request DeleteExchangeRequest) error { var deleteExchangeResponse DeleteExchangeResponse path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/exchanges/%v", request.Id) queryParams := make(map[string]any) @@ -616,7 +616,7 @@ func (a *providerExchangesPreviewImpl) Delete(ctx context.Context, request Delet return err } -func (a *providerExchangesPreviewImpl) DeleteListingFromExchange(ctx context.Context, request RemoveExchangeForListingRequest) error { +func (a *providerExchangesImpl) DeleteListingFromExchange(ctx context.Context, request RemoveExchangeForListingRequest) error { var removeExchangeForListingResponse RemoveExchangeForListingResponse path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/exchanges-for-listing/%v", request.Id) queryParams := make(map[string]any) @@ -626,7 +626,7 @@ func (a *providerExchangesPreviewImpl) DeleteListingFromExchange(ctx context.Con return err } -func (a *providerExchangesPreviewImpl) Get(ctx context.Context, request GetExchangeRequest) (*GetExchangeResponse, error) { +func (a *providerExchangesImpl) Get(ctx context.Context, request GetExchangeRequest) (*GetExchangeResponse, error) { var getExchangeResponse GetExchangeResponse path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/exchanges/%v", request.Id) queryParams := make(map[string]any) @@ -639,7 +639,7 @@ func (a *providerExchangesPreviewImpl) Get(ctx context.Context, request GetExcha // List exchanges. // // List exchanges visible to provider -func (a *providerExchangesPreviewImpl) List(ctx context.Context, request ListExchangesRequest) listing.Iterator[Exchange] { +func (a *providerExchangesImpl) List(ctx context.Context, request ListExchangesRequest) listing.Iterator[Exchange] { getNextPage := func(ctx context.Context, req ListExchangesRequest) (*ListExchangesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -666,11 +666,11 @@ func (a *providerExchangesPreviewImpl) List(ctx context.Context, request ListExc // List exchanges. // // List exchanges visible to provider -func (a *providerExchangesPreviewImpl) ListAll(ctx context.Context, request ListExchangesRequest) ([]Exchange, error) { +func (a *providerExchangesImpl) ListAll(ctx context.Context, request ListExchangesRequest) ([]Exchange, error) { iterator := a.List(ctx, request) return listing.ToSlice[Exchange](ctx, iterator) } -func (a *providerExchangesPreviewImpl) internalList(ctx context.Context, request ListExchangesRequest) (*ListExchangesResponse, error) { +func (a *providerExchangesImpl) internalList(ctx context.Context, request ListExchangesRequest) (*ListExchangesResponse, error) { var listExchangesResponse ListExchangesResponse path := "/api/2.0preview/marketplace-exchange/exchanges" queryParams := make(map[string]any) @@ -683,7 +683,7 @@ func (a *providerExchangesPreviewImpl) internalList(ctx context.Context, request // List exchanges for listing. // // List exchanges associated with a listing -func (a *providerExchangesPreviewImpl) ListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) listing.Iterator[ExchangeListing] { +func (a *providerExchangesImpl) ListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) listing.Iterator[ExchangeListing] { getNextPage := func(ctx context.Context, req ListExchangesForListingRequest) (*ListExchangesForListingResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -710,11 +710,11 @@ func (a *providerExchangesPreviewImpl) ListExchangesForListing(ctx context.Conte // List exchanges for listing. // // List exchanges associated with a listing -func (a *providerExchangesPreviewImpl) ListExchangesForListingAll(ctx context.Context, request ListExchangesForListingRequest) ([]ExchangeListing, error) { +func (a *providerExchangesImpl) ListExchangesForListingAll(ctx context.Context, request ListExchangesForListingRequest) ([]ExchangeListing, error) { iterator := a.ListExchangesForListing(ctx, request) return listing.ToSlice[ExchangeListing](ctx, iterator) } -func (a *providerExchangesPreviewImpl) internalListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) (*ListExchangesForListingResponse, error) { +func (a *providerExchangesImpl) internalListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) (*ListExchangesForListingResponse, error) { var listExchangesForListingResponse ListExchangesForListingResponse path := "/api/2.0preview/marketplace-exchange/exchanges-for-listing" queryParams := make(map[string]any) @@ -727,7 +727,7 @@ func (a *providerExchangesPreviewImpl) internalListExchangesForListing(ctx conte // List listings for exchange. // // List listings associated with an exchange -func (a *providerExchangesPreviewImpl) ListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) listing.Iterator[ExchangeListing] { +func (a *providerExchangesImpl) ListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) listing.Iterator[ExchangeListing] { getNextPage := func(ctx context.Context, req ListListingsForExchangeRequest) (*ListListingsForExchangeResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -754,11 +754,11 @@ func (a *providerExchangesPreviewImpl) ListListingsForExchange(ctx context.Conte // List listings for exchange. // // List listings associated with an exchange -func (a *providerExchangesPreviewImpl) ListListingsForExchangeAll(ctx context.Context, request ListListingsForExchangeRequest) ([]ExchangeListing, error) { +func (a *providerExchangesImpl) ListListingsForExchangeAll(ctx context.Context, request ListListingsForExchangeRequest) ([]ExchangeListing, error) { iterator := a.ListListingsForExchange(ctx, request) return listing.ToSlice[ExchangeListing](ctx, iterator) } -func (a *providerExchangesPreviewImpl) internalListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) (*ListListingsForExchangeResponse, error) { +func (a *providerExchangesImpl) internalListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) (*ListListingsForExchangeResponse, error) { var listListingsForExchangeResponse ListListingsForExchangeResponse path := "/api/2.0preview/marketplace-exchange/listings-for-exchange" queryParams := make(map[string]any) @@ -768,7 +768,7 @@ func (a *providerExchangesPreviewImpl) internalListListingsForExchange(ctx conte return &listListingsForExchangeResponse, err } -func (a *providerExchangesPreviewImpl) Update(ctx context.Context, request UpdateExchangeRequest) (*UpdateExchangeResponse, error) { +func (a *providerExchangesImpl) Update(ctx context.Context, request UpdateExchangeRequest) (*UpdateExchangeResponse, error) { var updateExchangeResponse UpdateExchangeResponse path := fmt.Sprintf("/api/2.0preview/marketplace-exchange/exchanges/%v", request.Id) queryParams := make(map[string]any) @@ -779,12 +779,12 @@ func (a *providerExchangesPreviewImpl) Update(ctx context.Context, request Updat return &updateExchangeResponse, err } -// unexported type that holds implementations of just ProviderFilesPreview API methods -type providerFilesPreviewImpl struct { +// unexported type that holds implementations of just ProviderFiles API methods +type providerFilesImpl struct { client *client.DatabricksClient } -func (a *providerFilesPreviewImpl) Create(ctx context.Context, request CreateFileRequest) (*CreateFileResponse, error) { +func (a *providerFilesImpl) Create(ctx context.Context, request CreateFileRequest) (*CreateFileResponse, error) { var createFileResponse CreateFileResponse path := "/api/2.0preview/marketplace-provider/files" queryParams := make(map[string]any) @@ -795,7 +795,7 @@ func (a *providerFilesPreviewImpl) Create(ctx context.Context, request CreateFil return &createFileResponse, err } -func (a *providerFilesPreviewImpl) Delete(ctx context.Context, request DeleteFileRequest) error { +func (a *providerFilesImpl) Delete(ctx context.Context, request DeleteFileRequest) error { var deleteFileResponse DeleteFileResponse path := fmt.Sprintf("/api/2.0preview/marketplace-provider/files/%v", request.FileId) queryParams := make(map[string]any) @@ -805,7 +805,7 @@ func (a *providerFilesPreviewImpl) Delete(ctx context.Context, request DeleteFil return err } -func (a *providerFilesPreviewImpl) Get(ctx context.Context, request GetFileRequest) (*GetFileResponse, error) { +func (a *providerFilesImpl) Get(ctx context.Context, request GetFileRequest) (*GetFileResponse, error) { var getFileResponse GetFileResponse path := fmt.Sprintf("/api/2.0preview/marketplace-provider/files/%v", request.FileId) queryParams := make(map[string]any) @@ -818,7 +818,7 @@ func (a *providerFilesPreviewImpl) Get(ctx context.Context, request GetFileReque // List files. // // List files attached to a parent entity. -func (a *providerFilesPreviewImpl) List(ctx context.Context, request ListFilesRequest) listing.Iterator[FileInfo] { +func (a *providerFilesImpl) List(ctx context.Context, request ListFilesRequest) listing.Iterator[FileInfo] { getNextPage := func(ctx context.Context, req ListFilesRequest) (*ListFilesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -845,11 +845,11 @@ func (a *providerFilesPreviewImpl) List(ctx context.Context, request ListFilesRe // List files. // // List files attached to a parent entity. -func (a *providerFilesPreviewImpl) ListAll(ctx context.Context, request ListFilesRequest) ([]FileInfo, error) { +func (a *providerFilesImpl) ListAll(ctx context.Context, request ListFilesRequest) ([]FileInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[FileInfo](ctx, iterator) } -func (a *providerFilesPreviewImpl) internalList(ctx context.Context, request ListFilesRequest) (*ListFilesResponse, error) { +func (a *providerFilesImpl) internalList(ctx context.Context, request ListFilesRequest) (*ListFilesResponse, error) { var listFilesResponse ListFilesResponse path := "/api/2.0preview/marketplace-provider/files" queryParams := make(map[string]any) @@ -859,12 +859,12 @@ func (a *providerFilesPreviewImpl) internalList(ctx context.Context, request Lis return &listFilesResponse, err } -// unexported type that holds implementations of just ProviderListingsPreview API methods -type providerListingsPreviewImpl struct { +// unexported type that holds implementations of just ProviderListings API methods +type providerListingsImpl struct { client *client.DatabricksClient } -func (a *providerListingsPreviewImpl) Create(ctx context.Context, request CreateListingRequest) (*CreateListingResponse, error) { +func (a *providerListingsImpl) Create(ctx context.Context, request CreateListingRequest) (*CreateListingResponse, error) { var createListingResponse CreateListingResponse path := "/api/2.0preview/marketplace-provider/listing" queryParams := make(map[string]any) @@ -875,7 +875,7 @@ func (a *providerListingsPreviewImpl) Create(ctx context.Context, request Create return &createListingResponse, err } -func (a *providerListingsPreviewImpl) Delete(ctx context.Context, request DeleteListingRequest) error { +func (a *providerListingsImpl) Delete(ctx context.Context, request DeleteListingRequest) error { var deleteListingResponse DeleteListingResponse path := fmt.Sprintf("/api/2.0preview/marketplace-provider/listings/%v", request.Id) queryParams := make(map[string]any) @@ -885,7 +885,7 @@ func (a *providerListingsPreviewImpl) Delete(ctx context.Context, request Delete return err } -func (a *providerListingsPreviewImpl) Get(ctx context.Context, request GetListingRequest) (*GetListingResponse, error) { +func (a *providerListingsImpl) Get(ctx context.Context, request GetListingRequest) (*GetListingResponse, error) { var getListingResponse GetListingResponse path := fmt.Sprintf("/api/2.0preview/marketplace-provider/listings/%v", request.Id) queryParams := make(map[string]any) @@ -898,7 +898,7 @@ func (a *providerListingsPreviewImpl) Get(ctx context.Context, request GetListin // List listings. // // List listings owned by this provider -func (a *providerListingsPreviewImpl) List(ctx context.Context, request GetListingsRequest) listing.Iterator[Listing] { +func (a *providerListingsImpl) List(ctx context.Context, request GetListingsRequest) listing.Iterator[Listing] { getNextPage := func(ctx context.Context, req GetListingsRequest) (*GetListingsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -925,11 +925,11 @@ func (a *providerListingsPreviewImpl) List(ctx context.Context, request GetListi // List listings. // // List listings owned by this provider -func (a *providerListingsPreviewImpl) ListAll(ctx context.Context, request GetListingsRequest) ([]Listing, error) { +func (a *providerListingsImpl) ListAll(ctx context.Context, request GetListingsRequest) ([]Listing, error) { iterator := a.List(ctx, request) return listing.ToSlice[Listing](ctx, iterator) } -func (a *providerListingsPreviewImpl) internalList(ctx context.Context, request GetListingsRequest) (*GetListingsResponse, error) { +func (a *providerListingsImpl) internalList(ctx context.Context, request GetListingsRequest) (*GetListingsResponse, error) { var getListingsResponse GetListingsResponse path := "/api/2.0preview/marketplace-provider/listings" queryParams := make(map[string]any) @@ -939,7 +939,7 @@ func (a *providerListingsPreviewImpl) internalList(ctx context.Context, request return &getListingsResponse, err } -func (a *providerListingsPreviewImpl) Update(ctx context.Context, request UpdateListingRequest) (*UpdateListingResponse, error) { +func (a *providerListingsImpl) Update(ctx context.Context, request UpdateListingRequest) (*UpdateListingResponse, error) { var updateListingResponse UpdateListingResponse path := fmt.Sprintf("/api/2.0preview/marketplace-provider/listings/%v", request.Id) queryParams := make(map[string]any) @@ -950,8 +950,8 @@ func (a *providerListingsPreviewImpl) Update(ctx context.Context, request Update return &updateListingResponse, err } -// unexported type that holds implementations of just ProviderPersonalizationRequestsPreview API methods -type providerPersonalizationRequestsPreviewImpl struct { +// unexported type that holds implementations of just ProviderPersonalizationRequests API methods +type providerPersonalizationRequestsImpl struct { client *client.DatabricksClient } @@ -959,7 +959,7 @@ type providerPersonalizationRequestsPreviewImpl struct { // // List personalization requests to this provider. This will return all // personalization requests, regardless of which listing they are for. -func (a *providerPersonalizationRequestsPreviewImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] { +func (a *providerPersonalizationRequestsImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] { getNextPage := func(ctx context.Context, req ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -987,11 +987,11 @@ func (a *providerPersonalizationRequestsPreviewImpl) List(ctx context.Context, r // // List personalization requests to this provider. This will return all // personalization requests, regardless of which listing they are for. -func (a *providerPersonalizationRequestsPreviewImpl) ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) { +func (a *providerPersonalizationRequestsImpl) ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) { iterator := a.List(ctx, request) return listing.ToSlice[PersonalizationRequest](ctx, iterator) } -func (a *providerPersonalizationRequestsPreviewImpl) internalList(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { +func (a *providerPersonalizationRequestsImpl) internalList(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { var listAllPersonalizationRequestsResponse ListAllPersonalizationRequestsResponse path := "/api/2.0preview/marketplace-provider/personalization-requests" queryParams := make(map[string]any) @@ -1001,7 +1001,7 @@ func (a *providerPersonalizationRequestsPreviewImpl) internalList(ctx context.Co return &listAllPersonalizationRequestsResponse, err } -func (a *providerPersonalizationRequestsPreviewImpl) Update(ctx context.Context, request UpdatePersonalizationRequestRequest) (*UpdatePersonalizationRequestResponse, error) { +func (a *providerPersonalizationRequestsImpl) Update(ctx context.Context, request UpdatePersonalizationRequestRequest) (*UpdatePersonalizationRequestResponse, error) { var updatePersonalizationRequestResponse UpdatePersonalizationRequestResponse path := fmt.Sprintf("/api/2.0preview/marketplace-provider/listings/%v/personalization-requests/%v/request-status", request.ListingId, request.RequestId) queryParams := make(map[string]any) @@ -1012,12 +1012,12 @@ func (a *providerPersonalizationRequestsPreviewImpl) Update(ctx context.Context, return &updatePersonalizationRequestResponse, err } -// unexported type that holds implementations of just ProviderProviderAnalyticsDashboardsPreview API methods -type providerProviderAnalyticsDashboardsPreviewImpl struct { +// unexported type that holds implementations of just ProviderProviderAnalyticsDashboards API methods +type providerProviderAnalyticsDashboardsImpl struct { client *client.DatabricksClient } -func (a *providerProviderAnalyticsDashboardsPreviewImpl) Create(ctx context.Context) (*ProviderAnalyticsDashboard, error) { +func (a *providerProviderAnalyticsDashboardsImpl) Create(ctx context.Context) (*ProviderAnalyticsDashboard, error) { var providerAnalyticsDashboard ProviderAnalyticsDashboard path := "/api/2.0preview/marketplace-provider/analytics_dashboard" @@ -1027,7 +1027,7 @@ func (a *providerProviderAnalyticsDashboardsPreviewImpl) Create(ctx context.Cont return &providerAnalyticsDashboard, err } -func (a *providerProviderAnalyticsDashboardsPreviewImpl) Get(ctx context.Context) (*ListProviderAnalyticsDashboardResponse, error) { +func (a *providerProviderAnalyticsDashboardsImpl) Get(ctx context.Context) (*ListProviderAnalyticsDashboardResponse, error) { var listProviderAnalyticsDashboardResponse ListProviderAnalyticsDashboardResponse path := "/api/2.0preview/marketplace-provider/analytics_dashboard" @@ -1037,7 +1037,7 @@ func (a *providerProviderAnalyticsDashboardsPreviewImpl) Get(ctx context.Context return &listProviderAnalyticsDashboardResponse, err } -func (a *providerProviderAnalyticsDashboardsPreviewImpl) GetLatestVersion(ctx context.Context) (*GetLatestVersionProviderAnalyticsDashboardResponse, error) { +func (a *providerProviderAnalyticsDashboardsImpl) GetLatestVersion(ctx context.Context) (*GetLatestVersionProviderAnalyticsDashboardResponse, error) { var getLatestVersionProviderAnalyticsDashboardResponse GetLatestVersionProviderAnalyticsDashboardResponse path := "/api/2.0preview/marketplace-provider/analytics_dashboard/latest" @@ -1047,7 +1047,7 @@ func (a *providerProviderAnalyticsDashboardsPreviewImpl) GetLatestVersion(ctx co return &getLatestVersionProviderAnalyticsDashboardResponse, err } -func (a *providerProviderAnalyticsDashboardsPreviewImpl) Update(ctx context.Context, request UpdateProviderAnalyticsDashboardRequest) (*UpdateProviderAnalyticsDashboardResponse, error) { +func (a *providerProviderAnalyticsDashboardsImpl) Update(ctx context.Context, request UpdateProviderAnalyticsDashboardRequest) (*UpdateProviderAnalyticsDashboardResponse, error) { var updateProviderAnalyticsDashboardResponse UpdateProviderAnalyticsDashboardResponse path := fmt.Sprintf("/api/2.0preview/marketplace-provider/analytics_dashboard/%v", request.Id) queryParams := make(map[string]any) @@ -1058,12 +1058,12 @@ func (a *providerProviderAnalyticsDashboardsPreviewImpl) Update(ctx context.Cont return &updateProviderAnalyticsDashboardResponse, err } -// unexported type that holds implementations of just ProviderProvidersPreview API methods -type providerProvidersPreviewImpl struct { +// unexported type that holds implementations of just ProviderProviders API methods +type providerProvidersImpl struct { client *client.DatabricksClient } -func (a *providerProvidersPreviewImpl) Create(ctx context.Context, request CreateProviderRequest) (*CreateProviderResponse, error) { +func (a *providerProvidersImpl) Create(ctx context.Context, request CreateProviderRequest) (*CreateProviderResponse, error) { var createProviderResponse CreateProviderResponse path := "/api/2.0preview/marketplace-provider/provider" queryParams := make(map[string]any) @@ -1074,7 +1074,7 @@ func (a *providerProvidersPreviewImpl) Create(ctx context.Context, request Creat return &createProviderResponse, err } -func (a *providerProvidersPreviewImpl) Delete(ctx context.Context, request DeleteProviderRequest) error { +func (a *providerProvidersImpl) Delete(ctx context.Context, request DeleteProviderRequest) error { var deleteProviderResponse DeleteProviderResponse path := fmt.Sprintf("/api/2.0preview/marketplace-provider/providers/%v", request.Id) queryParams := make(map[string]any) @@ -1084,7 +1084,7 @@ func (a *providerProvidersPreviewImpl) Delete(ctx context.Context, request Delet return err } -func (a *providerProvidersPreviewImpl) Get(ctx context.Context, request GetProviderRequest) (*GetProviderResponse, error) { +func (a *providerProvidersImpl) Get(ctx context.Context, request GetProviderRequest) (*GetProviderResponse, error) { var getProviderResponse GetProviderResponse path := fmt.Sprintf("/api/2.0preview/marketplace-provider/providers/%v", request.Id) queryParams := make(map[string]any) @@ -1097,7 +1097,7 @@ func (a *providerProvidersPreviewImpl) Get(ctx context.Context, request GetProvi // List providers. // // List provider profiles for account. -func (a *providerProvidersPreviewImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { +func (a *providerProvidersImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1124,11 +1124,11 @@ func (a *providerProvidersPreviewImpl) List(ctx context.Context, request ListPro // List providers. // // List provider profiles for account. -func (a *providerProvidersPreviewImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { +func (a *providerProvidersImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[ProviderInfo](ctx, iterator) } -func (a *providerProvidersPreviewImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { +func (a *providerProvidersImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { var listProvidersResponse ListProvidersResponse path := "/api/2.0preview/marketplace-provider/providers" queryParams := make(map[string]any) @@ -1138,7 +1138,7 @@ func (a *providerProvidersPreviewImpl) internalList(ctx context.Context, request return &listProvidersResponse, err } -func (a *providerProvidersPreviewImpl) Update(ctx context.Context, request UpdateProviderRequest) (*UpdateProviderResponse, error) { +func (a *providerProvidersImpl) Update(ctx context.Context, request UpdateProviderRequest) (*UpdateProviderResponse, error) { var updateProviderResponse UpdateProviderResponse path := fmt.Sprintf("/api/2.0preview/marketplace-provider/providers/%v", request.Id) queryParams := make(map[string]any) diff --git a/ml/v2preview/api.go b/ml/v2preview/api.go index c4f394496..39b00b0f6 100755 --- a/ml/v2preview/api.go +++ b/ml/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Experiments Preview, Model Registry Preview, etc. +// These APIs allow you to manage Experiments, Model Registry, etc. package mlpreview import ( @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/listing" ) -type ExperimentsPreviewInterface interface { +type ExperimentsInterface interface { // Create experiment. // @@ -319,9 +319,9 @@ type ExperimentsPreviewInterface interface { UpdateRun(ctx context.Context, request UpdateRun) (*UpdateRunResponse, error) } -func NewExperimentsPreview(client *client.DatabricksClient) *ExperimentsPreviewAPI { - return &ExperimentsPreviewAPI{ - experimentsPreviewImpl: experimentsPreviewImpl{ +func NewExperiments(client *client.DatabricksClient) *ExperimentsAPI { + return &ExperimentsAPI{ + experimentsImpl: experimentsImpl{ client: client, }, } @@ -336,15 +336,15 @@ func NewExperimentsPreview(client *client.DatabricksClient) *ExperimentsPreviewA // Experiments are located in the workspace file tree. You manage experiments // using the same tools you use to manage other workspace objects such as // folders, notebooks, and libraries. -type ExperimentsPreviewAPI struct { - experimentsPreviewImpl +type ExperimentsAPI struct { + experimentsImpl } // Get experiment permission levels. // // Gets the permission levels that a user can have on an object. -func (a *ExperimentsPreviewAPI) GetPermissionLevelsByExperimentId(ctx context.Context, experimentId string) (*GetExperimentPermissionLevelsResponse, error) { - return a.experimentsPreviewImpl.GetPermissionLevels(ctx, GetExperimentPermissionLevelsRequest{ +func (a *ExperimentsAPI) GetPermissionLevelsByExperimentId(ctx context.Context, experimentId string) (*GetExperimentPermissionLevelsResponse, error) { + return a.experimentsImpl.GetPermissionLevels(ctx, GetExperimentPermissionLevelsRequest{ ExperimentId: experimentId, }) } @@ -353,13 +353,13 @@ func (a *ExperimentsPreviewAPI) GetPermissionLevelsByExperimentId(ctx context.Co // // Gets the permissions of an experiment. Experiments can inherit permissions // from their root object. -func (a *ExperimentsPreviewAPI) GetPermissionsByExperimentId(ctx context.Context, experimentId string) (*ExperimentPermissions, error) { - return a.experimentsPreviewImpl.GetPermissions(ctx, GetExperimentPermissionsRequest{ +func (a *ExperimentsAPI) GetPermissionsByExperimentId(ctx context.Context, experimentId string) (*ExperimentPermissions, error) { + return a.experimentsImpl.GetPermissions(ctx, GetExperimentPermissionsRequest{ ExperimentId: experimentId, }) } -type ModelRegistryPreviewInterface interface { +type ModelRegistryInterface interface { // Approve transition request. // @@ -638,9 +638,9 @@ type ModelRegistryPreviewInterface interface { UpdateWebhook(ctx context.Context, request UpdateRegistryWebhook) error } -func NewModelRegistryPreview(client *client.DatabricksClient) *ModelRegistryPreviewAPI { - return &ModelRegistryPreviewAPI{ - modelRegistryPreviewImpl: modelRegistryPreviewImpl{ +func NewModelRegistry(client *client.DatabricksClient) *ModelRegistryAPI { + return &ModelRegistryAPI{ + modelRegistryImpl: modelRegistryImpl{ client: client, }, } @@ -654,15 +654,15 @@ func NewModelRegistryPreview(client *client.DatabricksClient) *ModelRegistryPrev // // The Workspace Model Registry is a centralized model repository and a UI and // set of APIs that enable you to manage the full lifecycle of MLflow Models. -type ModelRegistryPreviewAPI struct { - modelRegistryPreviewImpl +type ModelRegistryAPI struct { + modelRegistryImpl } // Get registered model permission levels. // // Gets the permission levels that a user can have on an object. -func (a *ModelRegistryPreviewAPI) GetPermissionLevelsByRegisteredModelId(ctx context.Context, registeredModelId string) (*GetRegisteredModelPermissionLevelsResponse, error) { - return a.modelRegistryPreviewImpl.GetPermissionLevels(ctx, GetRegisteredModelPermissionLevelsRequest{ +func (a *ModelRegistryAPI) GetPermissionLevelsByRegisteredModelId(ctx context.Context, registeredModelId string) (*GetRegisteredModelPermissionLevelsResponse, error) { + return a.modelRegistryImpl.GetPermissionLevels(ctx, GetRegisteredModelPermissionLevelsRequest{ RegisteredModelId: registeredModelId, }) } @@ -671,8 +671,8 @@ func (a *ModelRegistryPreviewAPI) GetPermissionLevelsByRegisteredModelId(ctx con // // Gets the permissions of a registered model. Registered models can inherit // permissions from their root object. -func (a *ModelRegistryPreviewAPI) GetPermissionsByRegisteredModelId(ctx context.Context, registeredModelId string) (*RegisteredModelPermissions, error) { - return a.modelRegistryPreviewImpl.GetPermissions(ctx, GetRegisteredModelPermissionsRequest{ +func (a *ModelRegistryAPI) GetPermissionsByRegisteredModelId(ctx context.Context, registeredModelId string) (*RegisteredModelPermissions, error) { + return a.modelRegistryImpl.GetPermissions(ctx, GetRegisteredModelPermissionsRequest{ RegisteredModelId: registeredModelId, }) } diff --git a/ml/v2preview/client.go b/ml/v2preview/client.go index d3b7aadaa..7b6cbdc2e 100755 --- a/ml/v2preview/client.go +++ b/ml/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type ExperimentsPreviewClient struct { - ExperimentsPreviewInterface +type ExperimentsClient struct { + ExperimentsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewExperimentsPreviewClient(cfg *config.Config) (*ExperimentsPreviewClient, error) { +func NewExperimentsClient(cfg *config.Config) (*ExperimentsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewExperimentsPreviewClient(cfg *config.Config) (*ExperimentsPreviewClient, return nil, err } - return &ExperimentsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ExperimentsPreviewInterface: NewExperimentsPreview(databricksClient), + return &ExperimentsClient{ + Config: cfg, + apiClient: apiClient, + ExperimentsInterface: NewExperiments(databricksClient), }, nil } -type ModelRegistryPreviewClient struct { - ModelRegistryPreviewInterface +type ModelRegistryClient struct { + ModelRegistryInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewModelRegistryPreviewClient(cfg *config.Config) (*ModelRegistryPreviewClient, error) { +func NewModelRegistryClient(cfg *config.Config) (*ModelRegistryClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,9 +71,9 @@ func NewModelRegistryPreviewClient(cfg *config.Config) (*ModelRegistryPreviewCli return nil, err } - return &ModelRegistryPreviewClient{ - Config: cfg, - apiClient: apiClient, - ModelRegistryPreviewInterface: NewModelRegistryPreview(databricksClient), + return &ModelRegistryClient{ + Config: cfg, + apiClient: apiClient, + ModelRegistryInterface: NewModelRegistry(databricksClient), }, nil } diff --git a/ml/v2preview/impl.go b/ml/v2preview/impl.go index e750866bf..23784be34 100755 --- a/ml/v2preview/impl.go +++ b/ml/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just ExperimentsPreview API methods -type experimentsPreviewImpl struct { +// unexported type that holds implementations of just Experiments API methods +type experimentsImpl struct { client *client.DatabricksClient } -func (a *experimentsPreviewImpl) CreateExperiment(ctx context.Context, request CreateExperiment) (*CreateExperimentResponse, error) { +func (a *experimentsImpl) CreateExperiment(ctx context.Context, request CreateExperiment) (*CreateExperimentResponse, error) { var createExperimentResponse CreateExperimentResponse path := "/api/2.0preview/mlflow/experiments/create" queryParams := make(map[string]any) @@ -28,7 +28,7 @@ func (a *experimentsPreviewImpl) CreateExperiment(ctx context.Context, request C return &createExperimentResponse, err } -func (a *experimentsPreviewImpl) CreateRun(ctx context.Context, request CreateRun) (*CreateRunResponse, error) { +func (a *experimentsImpl) CreateRun(ctx context.Context, request CreateRun) (*CreateRunResponse, error) { var createRunResponse CreateRunResponse path := "/api/2.0preview/mlflow/runs/create" queryParams := make(map[string]any) @@ -39,7 +39,7 @@ func (a *experimentsPreviewImpl) CreateRun(ctx context.Context, request CreateRu return &createRunResponse, err } -func (a *experimentsPreviewImpl) DeleteExperiment(ctx context.Context, request DeleteExperiment) error { +func (a *experimentsImpl) DeleteExperiment(ctx context.Context, request DeleteExperiment) error { var deleteExperimentResponse DeleteExperimentResponse path := "/api/2.0preview/mlflow/experiments/delete" queryParams := make(map[string]any) @@ -50,7 +50,7 @@ func (a *experimentsPreviewImpl) DeleteExperiment(ctx context.Context, request D return err } -func (a *experimentsPreviewImpl) DeleteRun(ctx context.Context, request DeleteRun) error { +func (a *experimentsImpl) DeleteRun(ctx context.Context, request DeleteRun) error { var deleteRunResponse DeleteRunResponse path := "/api/2.0preview/mlflow/runs/delete" queryParams := make(map[string]any) @@ -61,7 +61,7 @@ func (a *experimentsPreviewImpl) DeleteRun(ctx context.Context, request DeleteRu return err } -func (a *experimentsPreviewImpl) DeleteRuns(ctx context.Context, request DeleteRuns) (*DeleteRunsResponse, error) { +func (a *experimentsImpl) DeleteRuns(ctx context.Context, request DeleteRuns) (*DeleteRunsResponse, error) { var deleteRunsResponse DeleteRunsResponse path := "/api/2.0preview/mlflow/databricks/runs/delete-runs" queryParams := make(map[string]any) @@ -72,7 +72,7 @@ func (a *experimentsPreviewImpl) DeleteRuns(ctx context.Context, request DeleteR return &deleteRunsResponse, err } -func (a *experimentsPreviewImpl) DeleteTag(ctx context.Context, request DeleteTag) error { +func (a *experimentsImpl) DeleteTag(ctx context.Context, request DeleteTag) error { var deleteTagResponse DeleteTagResponse path := "/api/2.0preview/mlflow/runs/delete-tag" queryParams := make(map[string]any) @@ -83,7 +83,7 @@ func (a *experimentsPreviewImpl) DeleteTag(ctx context.Context, request DeleteTa return err } -func (a *experimentsPreviewImpl) GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentResponse, error) { +func (a *experimentsImpl) GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentResponse, error) { var getExperimentResponse GetExperimentResponse path := "/api/2.0preview/mlflow/experiments/get-by-name" queryParams := make(map[string]any) @@ -93,7 +93,7 @@ func (a *experimentsPreviewImpl) GetByName(ctx context.Context, request GetByNam return &getExperimentResponse, err } -func (a *experimentsPreviewImpl) GetExperiment(ctx context.Context, request GetExperimentRequest) (*GetExperimentResponse, error) { +func (a *experimentsImpl) GetExperiment(ctx context.Context, request GetExperimentRequest) (*GetExperimentResponse, error) { var getExperimentResponse GetExperimentResponse path := "/api/2.0preview/mlflow/experiments/get" queryParams := make(map[string]any) @@ -106,7 +106,7 @@ func (a *experimentsPreviewImpl) GetExperiment(ctx context.Context, request GetE // Get history of a given metric within a run. // // Gets a list of all values for the specified metric for a given run. -func (a *experimentsPreviewImpl) GetHistory(ctx context.Context, request GetHistoryRequest) listing.Iterator[Metric] { +func (a *experimentsImpl) GetHistory(ctx context.Context, request GetHistoryRequest) listing.Iterator[Metric] { getNextPage := func(ctx context.Context, req GetHistoryRequest) (*GetMetricHistoryResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -133,12 +133,12 @@ func (a *experimentsPreviewImpl) GetHistory(ctx context.Context, request GetHist // Get history of a given metric within a run. // // Gets a list of all values for the specified metric for a given run. -func (a *experimentsPreviewImpl) GetHistoryAll(ctx context.Context, request GetHistoryRequest) ([]Metric, error) { +func (a *experimentsImpl) GetHistoryAll(ctx context.Context, request GetHistoryRequest) ([]Metric, error) { iterator := a.GetHistory(ctx, request) return listing.ToSliceN[Metric, int](ctx, iterator, request.MaxResults) } -func (a *experimentsPreviewImpl) internalGetHistory(ctx context.Context, request GetHistoryRequest) (*GetMetricHistoryResponse, error) { +func (a *experimentsImpl) internalGetHistory(ctx context.Context, request GetHistoryRequest) (*GetMetricHistoryResponse, error) { var getMetricHistoryResponse GetMetricHistoryResponse path := "/api/2.0preview/mlflow/metrics/get-history" queryParams := make(map[string]any) @@ -148,7 +148,7 @@ func (a *experimentsPreviewImpl) internalGetHistory(ctx context.Context, request return &getMetricHistoryResponse, err } -func (a *experimentsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetExperimentPermissionLevelsRequest) (*GetExperimentPermissionLevelsResponse, error) { +func (a *experimentsImpl) GetPermissionLevels(ctx context.Context, request GetExperimentPermissionLevelsRequest) (*GetExperimentPermissionLevelsResponse, error) { var getExperimentPermissionLevelsResponse GetExperimentPermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/experiments/%v/permissionLevels", request.ExperimentId) queryParams := make(map[string]any) @@ -158,7 +158,7 @@ func (a *experimentsPreviewImpl) GetPermissionLevels(ctx context.Context, reques return &getExperimentPermissionLevelsResponse, err } -func (a *experimentsPreviewImpl) GetPermissions(ctx context.Context, request GetExperimentPermissionsRequest) (*ExperimentPermissions, error) { +func (a *experimentsImpl) GetPermissions(ctx context.Context, request GetExperimentPermissionsRequest) (*ExperimentPermissions, error) { var experimentPermissions ExperimentPermissions path := fmt.Sprintf("/api/2.0preview/permissions/experiments/%v", request.ExperimentId) queryParams := make(map[string]any) @@ -168,7 +168,7 @@ func (a *experimentsPreviewImpl) GetPermissions(ctx context.Context, request Get return &experimentPermissions, err } -func (a *experimentsPreviewImpl) GetRun(ctx context.Context, request GetRunRequest) (*GetRunResponse, error) { +func (a *experimentsImpl) GetRun(ctx context.Context, request GetRunRequest) (*GetRunResponse, error) { var getRunResponse GetRunResponse path := "/api/2.0preview/mlflow/runs/get" queryParams := make(map[string]any) @@ -187,7 +187,7 @@ func (a *experimentsPreviewImpl) GetRun(ctx context.Context, request GetRunReque // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC // Volumes, which supports pagination. See [List directory contents | Files // API](/api/workspace/files/listdirectorycontents). -func (a *experimentsPreviewImpl) ListArtifacts(ctx context.Context, request ListArtifactsRequest) listing.Iterator[FileInfo] { +func (a *experimentsImpl) ListArtifacts(ctx context.Context, request ListArtifactsRequest) listing.Iterator[FileInfo] { getNextPage := func(ctx context.Context, req ListArtifactsRequest) (*ListArtifactsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -220,11 +220,11 @@ func (a *experimentsPreviewImpl) ListArtifacts(ctx context.Context, request List // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC // Volumes, which supports pagination. See [List directory contents | Files // API](/api/workspace/files/listdirectorycontents). -func (a *experimentsPreviewImpl) ListArtifactsAll(ctx context.Context, request ListArtifactsRequest) ([]FileInfo, error) { +func (a *experimentsImpl) ListArtifactsAll(ctx context.Context, request ListArtifactsRequest) ([]FileInfo, error) { iterator := a.ListArtifacts(ctx, request) return listing.ToSlice[FileInfo](ctx, iterator) } -func (a *experimentsPreviewImpl) internalListArtifacts(ctx context.Context, request ListArtifactsRequest) (*ListArtifactsResponse, error) { +func (a *experimentsImpl) internalListArtifacts(ctx context.Context, request ListArtifactsRequest) (*ListArtifactsResponse, error) { var listArtifactsResponse ListArtifactsResponse path := "/api/2.0preview/mlflow/artifacts/list" queryParams := make(map[string]any) @@ -237,7 +237,7 @@ func (a *experimentsPreviewImpl) internalListArtifacts(ctx context.Context, requ // List experiments. // // Gets a list of all experiments. -func (a *experimentsPreviewImpl) ListExperiments(ctx context.Context, request ListExperimentsRequest) listing.Iterator[Experiment] { +func (a *experimentsImpl) ListExperiments(ctx context.Context, request ListExperimentsRequest) listing.Iterator[Experiment] { getNextPage := func(ctx context.Context, req ListExperimentsRequest) (*ListExperimentsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -264,12 +264,12 @@ func (a *experimentsPreviewImpl) ListExperiments(ctx context.Context, request Li // List experiments. // // Gets a list of all experiments. -func (a *experimentsPreviewImpl) ListExperimentsAll(ctx context.Context, request ListExperimentsRequest) ([]Experiment, error) { +func (a *experimentsImpl) ListExperimentsAll(ctx context.Context, request ListExperimentsRequest) ([]Experiment, error) { iterator := a.ListExperiments(ctx, request) return listing.ToSliceN[Experiment, int](ctx, iterator, request.MaxResults) } -func (a *experimentsPreviewImpl) internalListExperiments(ctx context.Context, request ListExperimentsRequest) (*ListExperimentsResponse, error) { +func (a *experimentsImpl) internalListExperiments(ctx context.Context, request ListExperimentsRequest) (*ListExperimentsResponse, error) { var listExperimentsResponse ListExperimentsResponse path := "/api/2.0preview/mlflow/experiments/list" queryParams := make(map[string]any) @@ -279,7 +279,7 @@ func (a *experimentsPreviewImpl) internalListExperiments(ctx context.Context, re return &listExperimentsResponse, err } -func (a *experimentsPreviewImpl) LogBatch(ctx context.Context, request LogBatch) error { +func (a *experimentsImpl) LogBatch(ctx context.Context, request LogBatch) error { var logBatchResponse LogBatchResponse path := "/api/2.0preview/mlflow/runs/log-batch" queryParams := make(map[string]any) @@ -290,7 +290,7 @@ func (a *experimentsPreviewImpl) LogBatch(ctx context.Context, request LogBatch) return err } -func (a *experimentsPreviewImpl) LogInputs(ctx context.Context, request LogInputs) error { +func (a *experimentsImpl) LogInputs(ctx context.Context, request LogInputs) error { var logInputsResponse LogInputsResponse path := "/api/2.0preview/mlflow/runs/log-inputs" queryParams := make(map[string]any) @@ -301,7 +301,7 @@ func (a *experimentsPreviewImpl) LogInputs(ctx context.Context, request LogInput return err } -func (a *experimentsPreviewImpl) LogMetric(ctx context.Context, request LogMetric) error { +func (a *experimentsImpl) LogMetric(ctx context.Context, request LogMetric) error { var logMetricResponse LogMetricResponse path := "/api/2.0preview/mlflow/runs/log-metric" queryParams := make(map[string]any) @@ -312,7 +312,7 @@ func (a *experimentsPreviewImpl) LogMetric(ctx context.Context, request LogMetri return err } -func (a *experimentsPreviewImpl) LogModel(ctx context.Context, request LogModel) error { +func (a *experimentsImpl) LogModel(ctx context.Context, request LogModel) error { var logModelResponse LogModelResponse path := "/api/2.0preview/mlflow/runs/log-model" queryParams := make(map[string]any) @@ -323,7 +323,7 @@ func (a *experimentsPreviewImpl) LogModel(ctx context.Context, request LogModel) return err } -func (a *experimentsPreviewImpl) LogParam(ctx context.Context, request LogParam) error { +func (a *experimentsImpl) LogParam(ctx context.Context, request LogParam) error { var logParamResponse LogParamResponse path := "/api/2.0preview/mlflow/runs/log-parameter" queryParams := make(map[string]any) @@ -334,7 +334,7 @@ func (a *experimentsPreviewImpl) LogParam(ctx context.Context, request LogParam) return err } -func (a *experimentsPreviewImpl) RestoreExperiment(ctx context.Context, request RestoreExperiment) error { +func (a *experimentsImpl) RestoreExperiment(ctx context.Context, request RestoreExperiment) error { var restoreExperimentResponse RestoreExperimentResponse path := "/api/2.0preview/mlflow/experiments/restore" queryParams := make(map[string]any) @@ -345,7 +345,7 @@ func (a *experimentsPreviewImpl) RestoreExperiment(ctx context.Context, request return err } -func (a *experimentsPreviewImpl) RestoreRun(ctx context.Context, request RestoreRun) error { +func (a *experimentsImpl) RestoreRun(ctx context.Context, request RestoreRun) error { var restoreRunResponse RestoreRunResponse path := "/api/2.0preview/mlflow/runs/restore" queryParams := make(map[string]any) @@ -356,7 +356,7 @@ func (a *experimentsPreviewImpl) RestoreRun(ctx context.Context, request Restore return err } -func (a *experimentsPreviewImpl) RestoreRuns(ctx context.Context, request RestoreRuns) (*RestoreRunsResponse, error) { +func (a *experimentsImpl) RestoreRuns(ctx context.Context, request RestoreRuns) (*RestoreRunsResponse, error) { var restoreRunsResponse RestoreRunsResponse path := "/api/2.0preview/mlflow/databricks/runs/restore-runs" queryParams := make(map[string]any) @@ -370,7 +370,7 @@ func (a *experimentsPreviewImpl) RestoreRuns(ctx context.Context, request Restor // Search experiments. // // Searches for experiments that satisfy specified search criteria. -func (a *experimentsPreviewImpl) SearchExperiments(ctx context.Context, request SearchExperiments) listing.Iterator[Experiment] { +func (a *experimentsImpl) SearchExperiments(ctx context.Context, request SearchExperiments) listing.Iterator[Experiment] { getNextPage := func(ctx context.Context, req SearchExperiments) (*SearchExperimentsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -397,11 +397,11 @@ func (a *experimentsPreviewImpl) SearchExperiments(ctx context.Context, request // Search experiments. // // Searches for experiments that satisfy specified search criteria. -func (a *experimentsPreviewImpl) SearchExperimentsAll(ctx context.Context, request SearchExperiments) ([]Experiment, error) { +func (a *experimentsImpl) SearchExperimentsAll(ctx context.Context, request SearchExperiments) ([]Experiment, error) { iterator := a.SearchExperiments(ctx, request) return listing.ToSlice[Experiment](ctx, iterator) } -func (a *experimentsPreviewImpl) internalSearchExperiments(ctx context.Context, request SearchExperiments) (*SearchExperimentsResponse, error) { +func (a *experimentsImpl) internalSearchExperiments(ctx context.Context, request SearchExperiments) (*SearchExperimentsResponse, error) { var searchExperimentsResponse SearchExperimentsResponse path := "/api/2.0preview/mlflow/experiments/search" queryParams := make(map[string]any) @@ -417,7 +417,7 @@ func (a *experimentsPreviewImpl) internalSearchExperiments(ctx context.Context, // Searches for runs that satisfy expressions. // // Search expressions can use `mlflowMetric` and `mlflowParam` keys.", -func (a *experimentsPreviewImpl) SearchRuns(ctx context.Context, request SearchRuns) listing.Iterator[Run] { +func (a *experimentsImpl) SearchRuns(ctx context.Context, request SearchRuns) listing.Iterator[Run] { getNextPage := func(ctx context.Context, req SearchRuns) (*SearchRunsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -446,11 +446,11 @@ func (a *experimentsPreviewImpl) SearchRuns(ctx context.Context, request SearchR // Searches for runs that satisfy expressions. // // Search expressions can use `mlflowMetric` and `mlflowParam` keys.", -func (a *experimentsPreviewImpl) SearchRunsAll(ctx context.Context, request SearchRuns) ([]Run, error) { +func (a *experimentsImpl) SearchRunsAll(ctx context.Context, request SearchRuns) ([]Run, error) { iterator := a.SearchRuns(ctx, request) return listing.ToSlice[Run](ctx, iterator) } -func (a *experimentsPreviewImpl) internalSearchRuns(ctx context.Context, request SearchRuns) (*SearchRunsResponse, error) { +func (a *experimentsImpl) internalSearchRuns(ctx context.Context, request SearchRuns) (*SearchRunsResponse, error) { var searchRunsResponse SearchRunsResponse path := "/api/2.0preview/mlflow/runs/search" queryParams := make(map[string]any) @@ -461,7 +461,7 @@ func (a *experimentsPreviewImpl) internalSearchRuns(ctx context.Context, request return &searchRunsResponse, err } -func (a *experimentsPreviewImpl) SetExperimentTag(ctx context.Context, request SetExperimentTag) error { +func (a *experimentsImpl) SetExperimentTag(ctx context.Context, request SetExperimentTag) error { var setExperimentTagResponse SetExperimentTagResponse path := "/api/2.0preview/mlflow/experiments/set-experiment-tag" queryParams := make(map[string]any) @@ -472,7 +472,7 @@ func (a *experimentsPreviewImpl) SetExperimentTag(ctx context.Context, request S return err } -func (a *experimentsPreviewImpl) SetPermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) { +func (a *experimentsImpl) SetPermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) { var experimentPermissions ExperimentPermissions path := fmt.Sprintf("/api/2.0preview/permissions/experiments/%v", request.ExperimentId) queryParams := make(map[string]any) @@ -483,7 +483,7 @@ func (a *experimentsPreviewImpl) SetPermissions(ctx context.Context, request Exp return &experimentPermissions, err } -func (a *experimentsPreviewImpl) SetTag(ctx context.Context, request SetTag) error { +func (a *experimentsImpl) SetTag(ctx context.Context, request SetTag) error { var setTagResponse SetTagResponse path := "/api/2.0preview/mlflow/runs/set-tag" queryParams := make(map[string]any) @@ -494,7 +494,7 @@ func (a *experimentsPreviewImpl) SetTag(ctx context.Context, request SetTag) err return err } -func (a *experimentsPreviewImpl) UpdateExperiment(ctx context.Context, request UpdateExperiment) error { +func (a *experimentsImpl) UpdateExperiment(ctx context.Context, request UpdateExperiment) error { var updateExperimentResponse UpdateExperimentResponse path := "/api/2.0preview/mlflow/experiments/update" queryParams := make(map[string]any) @@ -505,7 +505,7 @@ func (a *experimentsPreviewImpl) UpdateExperiment(ctx context.Context, request U return err } -func (a *experimentsPreviewImpl) UpdatePermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) { +func (a *experimentsImpl) UpdatePermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) { var experimentPermissions ExperimentPermissions path := fmt.Sprintf("/api/2.0preview/permissions/experiments/%v", request.ExperimentId) queryParams := make(map[string]any) @@ -516,7 +516,7 @@ func (a *experimentsPreviewImpl) UpdatePermissions(ctx context.Context, request return &experimentPermissions, err } -func (a *experimentsPreviewImpl) UpdateRun(ctx context.Context, request UpdateRun) (*UpdateRunResponse, error) { +func (a *experimentsImpl) UpdateRun(ctx context.Context, request UpdateRun) (*UpdateRunResponse, error) { var updateRunResponse UpdateRunResponse path := "/api/2.0preview/mlflow/runs/update" queryParams := make(map[string]any) @@ -527,12 +527,12 @@ func (a *experimentsPreviewImpl) UpdateRun(ctx context.Context, request UpdateRu return &updateRunResponse, err } -// unexported type that holds implementations of just ModelRegistryPreview API methods -type modelRegistryPreviewImpl struct { +// unexported type that holds implementations of just ModelRegistry API methods +type modelRegistryImpl struct { client *client.DatabricksClient } -func (a *modelRegistryPreviewImpl) ApproveTransitionRequest(ctx context.Context, request ApproveTransitionRequest) (*ApproveTransitionRequestResponse, error) { +func (a *modelRegistryImpl) ApproveTransitionRequest(ctx context.Context, request ApproveTransitionRequest) (*ApproveTransitionRequestResponse, error) { var approveTransitionRequestResponse ApproveTransitionRequestResponse path := "/api/2.0preview/mlflow/transition-requests/approve" queryParams := make(map[string]any) @@ -543,7 +543,7 @@ func (a *modelRegistryPreviewImpl) ApproveTransitionRequest(ctx context.Context, return &approveTransitionRequestResponse, err } -func (a *modelRegistryPreviewImpl) CreateComment(ctx context.Context, request CreateComment) (*CreateCommentResponse, error) { +func (a *modelRegistryImpl) CreateComment(ctx context.Context, request CreateComment) (*CreateCommentResponse, error) { var createCommentResponse CreateCommentResponse path := "/api/2.0preview/mlflow/comments/create" queryParams := make(map[string]any) @@ -554,7 +554,7 @@ func (a *modelRegistryPreviewImpl) CreateComment(ctx context.Context, request Cr return &createCommentResponse, err } -func (a *modelRegistryPreviewImpl) CreateModel(ctx context.Context, request CreateModelRequest) (*CreateModelResponse, error) { +func (a *modelRegistryImpl) CreateModel(ctx context.Context, request CreateModelRequest) (*CreateModelResponse, error) { var createModelResponse CreateModelResponse path := "/api/2.0preview/mlflow/registered-models/create" queryParams := make(map[string]any) @@ -565,7 +565,7 @@ func (a *modelRegistryPreviewImpl) CreateModel(ctx context.Context, request Crea return &createModelResponse, err } -func (a *modelRegistryPreviewImpl) CreateModelVersion(ctx context.Context, request CreateModelVersionRequest) (*CreateModelVersionResponse, error) { +func (a *modelRegistryImpl) CreateModelVersion(ctx context.Context, request CreateModelVersionRequest) (*CreateModelVersionResponse, error) { var createModelVersionResponse CreateModelVersionResponse path := "/api/2.0preview/mlflow/model-versions/create" queryParams := make(map[string]any) @@ -576,7 +576,7 @@ func (a *modelRegistryPreviewImpl) CreateModelVersion(ctx context.Context, reque return &createModelVersionResponse, err } -func (a *modelRegistryPreviewImpl) CreateTransitionRequest(ctx context.Context, request CreateTransitionRequest) (*CreateTransitionRequestResponse, error) { +func (a *modelRegistryImpl) CreateTransitionRequest(ctx context.Context, request CreateTransitionRequest) (*CreateTransitionRequestResponse, error) { var createTransitionRequestResponse CreateTransitionRequestResponse path := "/api/2.0preview/mlflow/transition-requests/create" queryParams := make(map[string]any) @@ -587,7 +587,7 @@ func (a *modelRegistryPreviewImpl) CreateTransitionRequest(ctx context.Context, return &createTransitionRequestResponse, err } -func (a *modelRegistryPreviewImpl) CreateWebhook(ctx context.Context, request CreateRegistryWebhook) (*CreateWebhookResponse, error) { +func (a *modelRegistryImpl) CreateWebhook(ctx context.Context, request CreateRegistryWebhook) (*CreateWebhookResponse, error) { var createWebhookResponse CreateWebhookResponse path := "/api/2.0preview/mlflow/registry-webhooks/create" queryParams := make(map[string]any) @@ -598,7 +598,7 @@ func (a *modelRegistryPreviewImpl) CreateWebhook(ctx context.Context, request Cr return &createWebhookResponse, err } -func (a *modelRegistryPreviewImpl) DeleteComment(ctx context.Context, request DeleteCommentRequest) error { +func (a *modelRegistryImpl) DeleteComment(ctx context.Context, request DeleteCommentRequest) error { var deleteCommentResponse DeleteCommentResponse path := "/api/2.0preview/mlflow/comments/delete" queryParams := make(map[string]any) @@ -608,7 +608,7 @@ func (a *modelRegistryPreviewImpl) DeleteComment(ctx context.Context, request De return err } -func (a *modelRegistryPreviewImpl) DeleteModel(ctx context.Context, request DeleteModelRequest) error { +func (a *modelRegistryImpl) DeleteModel(ctx context.Context, request DeleteModelRequest) error { var deleteModelResponse DeleteModelResponse path := "/api/2.0preview/mlflow/registered-models/delete" queryParams := make(map[string]any) @@ -618,7 +618,7 @@ func (a *modelRegistryPreviewImpl) DeleteModel(ctx context.Context, request Dele return err } -func (a *modelRegistryPreviewImpl) DeleteModelTag(ctx context.Context, request DeleteModelTagRequest) error { +func (a *modelRegistryImpl) DeleteModelTag(ctx context.Context, request DeleteModelTagRequest) error { var deleteModelTagResponse DeleteModelTagResponse path := "/api/2.0preview/mlflow/registered-models/delete-tag" queryParams := make(map[string]any) @@ -628,7 +628,7 @@ func (a *modelRegistryPreviewImpl) DeleteModelTag(ctx context.Context, request D return err } -func (a *modelRegistryPreviewImpl) DeleteModelVersion(ctx context.Context, request DeleteModelVersionRequest) error { +func (a *modelRegistryImpl) DeleteModelVersion(ctx context.Context, request DeleteModelVersionRequest) error { var deleteModelVersionResponse DeleteModelVersionResponse path := "/api/2.0preview/mlflow/model-versions/delete" queryParams := make(map[string]any) @@ -638,7 +638,7 @@ func (a *modelRegistryPreviewImpl) DeleteModelVersion(ctx context.Context, reque return err } -func (a *modelRegistryPreviewImpl) DeleteModelVersionTag(ctx context.Context, request DeleteModelVersionTagRequest) error { +func (a *modelRegistryImpl) DeleteModelVersionTag(ctx context.Context, request DeleteModelVersionTagRequest) error { var deleteModelVersionTagResponse DeleteModelVersionTagResponse path := "/api/2.0preview/mlflow/model-versions/delete-tag" queryParams := make(map[string]any) @@ -648,7 +648,7 @@ func (a *modelRegistryPreviewImpl) DeleteModelVersionTag(ctx context.Context, re return err } -func (a *modelRegistryPreviewImpl) DeleteTransitionRequest(ctx context.Context, request DeleteTransitionRequestRequest) error { +func (a *modelRegistryImpl) DeleteTransitionRequest(ctx context.Context, request DeleteTransitionRequestRequest) error { var deleteTransitionRequestResponse DeleteTransitionRequestResponse path := "/api/2.0preview/mlflow/transition-requests/delete" queryParams := make(map[string]any) @@ -658,7 +658,7 @@ func (a *modelRegistryPreviewImpl) DeleteTransitionRequest(ctx context.Context, return err } -func (a *modelRegistryPreviewImpl) DeleteWebhook(ctx context.Context, request DeleteWebhookRequest) error { +func (a *modelRegistryImpl) DeleteWebhook(ctx context.Context, request DeleteWebhookRequest) error { var deleteWebhookResponse DeleteWebhookResponse path := "/api/2.0preview/mlflow/registry-webhooks/delete" queryParams := make(map[string]any) @@ -671,7 +671,7 @@ func (a *modelRegistryPreviewImpl) DeleteWebhook(ctx context.Context, request De // Get the latest version. // // Gets the latest version of a registered model. -func (a *modelRegistryPreviewImpl) GetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) listing.Iterator[ModelVersion] { +func (a *modelRegistryImpl) GetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) listing.Iterator[ModelVersion] { getNextPage := func(ctx context.Context, req GetLatestVersionsRequest) (*GetLatestVersionsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -692,11 +692,11 @@ func (a *modelRegistryPreviewImpl) GetLatestVersions(ctx context.Context, reques // Get the latest version. // // Gets the latest version of a registered model. -func (a *modelRegistryPreviewImpl) GetLatestVersionsAll(ctx context.Context, request GetLatestVersionsRequest) ([]ModelVersion, error) { +func (a *modelRegistryImpl) GetLatestVersionsAll(ctx context.Context, request GetLatestVersionsRequest) ([]ModelVersion, error) { iterator := a.GetLatestVersions(ctx, request) return listing.ToSlice[ModelVersion](ctx, iterator) } -func (a *modelRegistryPreviewImpl) internalGetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) (*GetLatestVersionsResponse, error) { +func (a *modelRegistryImpl) internalGetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) (*GetLatestVersionsResponse, error) { var getLatestVersionsResponse GetLatestVersionsResponse path := "/api/2.0preview/mlflow/registered-models/get-latest-versions" queryParams := make(map[string]any) @@ -707,7 +707,7 @@ func (a *modelRegistryPreviewImpl) internalGetLatestVersions(ctx context.Context return &getLatestVersionsResponse, err } -func (a *modelRegistryPreviewImpl) GetModel(ctx context.Context, request GetModelRequest) (*GetModelResponse, error) { +func (a *modelRegistryImpl) GetModel(ctx context.Context, request GetModelRequest) (*GetModelResponse, error) { var getModelResponse GetModelResponse path := "/api/2.0preview/mlflow/databricks/registered-models/get" queryParams := make(map[string]any) @@ -717,7 +717,7 @@ func (a *modelRegistryPreviewImpl) GetModel(ctx context.Context, request GetMode return &getModelResponse, err } -func (a *modelRegistryPreviewImpl) GetModelVersion(ctx context.Context, request GetModelVersionRequest) (*GetModelVersionResponse, error) { +func (a *modelRegistryImpl) GetModelVersion(ctx context.Context, request GetModelVersionRequest) (*GetModelVersionResponse, error) { var getModelVersionResponse GetModelVersionResponse path := "/api/2.0preview/mlflow/model-versions/get" queryParams := make(map[string]any) @@ -727,7 +727,7 @@ func (a *modelRegistryPreviewImpl) GetModelVersion(ctx context.Context, request return &getModelVersionResponse, err } -func (a *modelRegistryPreviewImpl) GetModelVersionDownloadUri(ctx context.Context, request GetModelVersionDownloadUriRequest) (*GetModelVersionDownloadUriResponse, error) { +func (a *modelRegistryImpl) GetModelVersionDownloadUri(ctx context.Context, request GetModelVersionDownloadUriRequest) (*GetModelVersionDownloadUriResponse, error) { var getModelVersionDownloadUriResponse GetModelVersionDownloadUriResponse path := "/api/2.0preview/mlflow/model-versions/get-download-uri" queryParams := make(map[string]any) @@ -737,7 +737,7 @@ func (a *modelRegistryPreviewImpl) GetModelVersionDownloadUri(ctx context.Contex return &getModelVersionDownloadUriResponse, err } -func (a *modelRegistryPreviewImpl) GetPermissionLevels(ctx context.Context, request GetRegisteredModelPermissionLevelsRequest) (*GetRegisteredModelPermissionLevelsResponse, error) { +func (a *modelRegistryImpl) GetPermissionLevels(ctx context.Context, request GetRegisteredModelPermissionLevelsRequest) (*GetRegisteredModelPermissionLevelsResponse, error) { var getRegisteredModelPermissionLevelsResponse GetRegisteredModelPermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/registered-models/%v/permissionLevels", request.RegisteredModelId) queryParams := make(map[string]any) @@ -747,7 +747,7 @@ func (a *modelRegistryPreviewImpl) GetPermissionLevels(ctx context.Context, requ return &getRegisteredModelPermissionLevelsResponse, err } -func (a *modelRegistryPreviewImpl) GetPermissions(ctx context.Context, request GetRegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { +func (a *modelRegistryImpl) GetPermissions(ctx context.Context, request GetRegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { var registeredModelPermissions RegisteredModelPermissions path := fmt.Sprintf("/api/2.0preview/permissions/registered-models/%v", request.RegisteredModelId) queryParams := make(map[string]any) @@ -761,7 +761,7 @@ func (a *modelRegistryPreviewImpl) GetPermissions(ctx context.Context, request G // // Lists all available registered models, up to the limit specified in // __max_results__. -func (a *modelRegistryPreviewImpl) ListModels(ctx context.Context, request ListModelsRequest) listing.Iterator[Model] { +func (a *modelRegistryImpl) ListModels(ctx context.Context, request ListModelsRequest) listing.Iterator[Model] { getNextPage := func(ctx context.Context, req ListModelsRequest) (*ListModelsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -789,12 +789,12 @@ func (a *modelRegistryPreviewImpl) ListModels(ctx context.Context, request ListM // // Lists all available registered models, up to the limit specified in // __max_results__. -func (a *modelRegistryPreviewImpl) ListModelsAll(ctx context.Context, request ListModelsRequest) ([]Model, error) { +func (a *modelRegistryImpl) ListModelsAll(ctx context.Context, request ListModelsRequest) ([]Model, error) { iterator := a.ListModels(ctx, request) return listing.ToSliceN[Model, int](ctx, iterator, request.MaxResults) } -func (a *modelRegistryPreviewImpl) internalListModels(ctx context.Context, request ListModelsRequest) (*ListModelsResponse, error) { +func (a *modelRegistryImpl) internalListModels(ctx context.Context, request ListModelsRequest) (*ListModelsResponse, error) { var listModelsResponse ListModelsResponse path := "/api/2.0preview/mlflow/registered-models/list" queryParams := make(map[string]any) @@ -807,7 +807,7 @@ func (a *modelRegistryPreviewImpl) internalListModels(ctx context.Context, reque // List transition requests. // // Gets a list of all open stage transition requests for the model version. -func (a *modelRegistryPreviewImpl) ListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) listing.Iterator[Activity] { +func (a *modelRegistryImpl) ListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) listing.Iterator[Activity] { getNextPage := func(ctx context.Context, req ListTransitionRequestsRequest) (*ListTransitionRequestsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -828,11 +828,11 @@ func (a *modelRegistryPreviewImpl) ListTransitionRequests(ctx context.Context, r // List transition requests. // // Gets a list of all open stage transition requests for the model version. -func (a *modelRegistryPreviewImpl) ListTransitionRequestsAll(ctx context.Context, request ListTransitionRequestsRequest) ([]Activity, error) { +func (a *modelRegistryImpl) ListTransitionRequestsAll(ctx context.Context, request ListTransitionRequestsRequest) ([]Activity, error) { iterator := a.ListTransitionRequests(ctx, request) return listing.ToSlice[Activity](ctx, iterator) } -func (a *modelRegistryPreviewImpl) internalListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) (*ListTransitionRequestsResponse, error) { +func (a *modelRegistryImpl) internalListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) (*ListTransitionRequestsResponse, error) { var listTransitionRequestsResponse ListTransitionRequestsResponse path := "/api/2.0preview/mlflow/transition-requests/list" queryParams := make(map[string]any) @@ -847,7 +847,7 @@ func (a *modelRegistryPreviewImpl) internalListTransitionRequests(ctx context.Co // **NOTE:** This endpoint is in Public Preview. // // Lists all registry webhooks. -func (a *modelRegistryPreviewImpl) ListWebhooks(ctx context.Context, request ListWebhooksRequest) listing.Iterator[RegistryWebhook] { +func (a *modelRegistryImpl) ListWebhooks(ctx context.Context, request ListWebhooksRequest) listing.Iterator[RegistryWebhook] { getNextPage := func(ctx context.Context, req ListWebhooksRequest) (*ListRegistryWebhooks, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -876,11 +876,11 @@ func (a *modelRegistryPreviewImpl) ListWebhooks(ctx context.Context, request Lis // **NOTE:** This endpoint is in Public Preview. // // Lists all registry webhooks. -func (a *modelRegistryPreviewImpl) ListWebhooksAll(ctx context.Context, request ListWebhooksRequest) ([]RegistryWebhook, error) { +func (a *modelRegistryImpl) ListWebhooksAll(ctx context.Context, request ListWebhooksRequest) ([]RegistryWebhook, error) { iterator := a.ListWebhooks(ctx, request) return listing.ToSlice[RegistryWebhook](ctx, iterator) } -func (a *modelRegistryPreviewImpl) internalListWebhooks(ctx context.Context, request ListWebhooksRequest) (*ListRegistryWebhooks, error) { +func (a *modelRegistryImpl) internalListWebhooks(ctx context.Context, request ListWebhooksRequest) (*ListRegistryWebhooks, error) { var listRegistryWebhooks ListRegistryWebhooks path := "/api/2.0preview/mlflow/registry-webhooks/list" queryParams := make(map[string]any) @@ -890,7 +890,7 @@ func (a *modelRegistryPreviewImpl) internalListWebhooks(ctx context.Context, req return &listRegistryWebhooks, err } -func (a *modelRegistryPreviewImpl) RejectTransitionRequest(ctx context.Context, request RejectTransitionRequest) (*RejectTransitionRequestResponse, error) { +func (a *modelRegistryImpl) RejectTransitionRequest(ctx context.Context, request RejectTransitionRequest) (*RejectTransitionRequestResponse, error) { var rejectTransitionRequestResponse RejectTransitionRequestResponse path := "/api/2.0preview/mlflow/transition-requests/reject" queryParams := make(map[string]any) @@ -901,7 +901,7 @@ func (a *modelRegistryPreviewImpl) RejectTransitionRequest(ctx context.Context, return &rejectTransitionRequestResponse, err } -func (a *modelRegistryPreviewImpl) RenameModel(ctx context.Context, request RenameModelRequest) (*RenameModelResponse, error) { +func (a *modelRegistryImpl) RenameModel(ctx context.Context, request RenameModelRequest) (*RenameModelResponse, error) { var renameModelResponse RenameModelResponse path := "/api/2.0preview/mlflow/registered-models/rename" queryParams := make(map[string]any) @@ -915,7 +915,7 @@ func (a *modelRegistryPreviewImpl) RenameModel(ctx context.Context, request Rena // Searches model versions. // // Searches for specific model versions based on the supplied __filter__. -func (a *modelRegistryPreviewImpl) SearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) listing.Iterator[ModelVersion] { +func (a *modelRegistryImpl) SearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) listing.Iterator[ModelVersion] { getNextPage := func(ctx context.Context, req SearchModelVersionsRequest) (*SearchModelVersionsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -942,12 +942,12 @@ func (a *modelRegistryPreviewImpl) SearchModelVersions(ctx context.Context, requ // Searches model versions. // // Searches for specific model versions based on the supplied __filter__. -func (a *modelRegistryPreviewImpl) SearchModelVersionsAll(ctx context.Context, request SearchModelVersionsRequest) ([]ModelVersion, error) { +func (a *modelRegistryImpl) SearchModelVersionsAll(ctx context.Context, request SearchModelVersionsRequest) ([]ModelVersion, error) { iterator := a.SearchModelVersions(ctx, request) return listing.ToSliceN[ModelVersion, int](ctx, iterator, request.MaxResults) } -func (a *modelRegistryPreviewImpl) internalSearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) (*SearchModelVersionsResponse, error) { +func (a *modelRegistryImpl) internalSearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) (*SearchModelVersionsResponse, error) { var searchModelVersionsResponse SearchModelVersionsResponse path := "/api/2.0preview/mlflow/model-versions/search" queryParams := make(map[string]any) @@ -960,7 +960,7 @@ func (a *modelRegistryPreviewImpl) internalSearchModelVersions(ctx context.Conte // Search models. // // Search for registered models based on the specified __filter__. -func (a *modelRegistryPreviewImpl) SearchModels(ctx context.Context, request SearchModelsRequest) listing.Iterator[Model] { +func (a *modelRegistryImpl) SearchModels(ctx context.Context, request SearchModelsRequest) listing.Iterator[Model] { getNextPage := func(ctx context.Context, req SearchModelsRequest) (*SearchModelsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -987,12 +987,12 @@ func (a *modelRegistryPreviewImpl) SearchModels(ctx context.Context, request Sea // Search models. // // Search for registered models based on the specified __filter__. -func (a *modelRegistryPreviewImpl) SearchModelsAll(ctx context.Context, request SearchModelsRequest) ([]Model, error) { +func (a *modelRegistryImpl) SearchModelsAll(ctx context.Context, request SearchModelsRequest) ([]Model, error) { iterator := a.SearchModels(ctx, request) return listing.ToSliceN[Model, int](ctx, iterator, request.MaxResults) } -func (a *modelRegistryPreviewImpl) internalSearchModels(ctx context.Context, request SearchModelsRequest) (*SearchModelsResponse, error) { +func (a *modelRegistryImpl) internalSearchModels(ctx context.Context, request SearchModelsRequest) (*SearchModelsResponse, error) { var searchModelsResponse SearchModelsResponse path := "/api/2.0preview/mlflow/registered-models/search" queryParams := make(map[string]any) @@ -1002,7 +1002,7 @@ func (a *modelRegistryPreviewImpl) internalSearchModels(ctx context.Context, req return &searchModelsResponse, err } -func (a *modelRegistryPreviewImpl) SetModelTag(ctx context.Context, request SetModelTagRequest) error { +func (a *modelRegistryImpl) SetModelTag(ctx context.Context, request SetModelTagRequest) error { var setModelTagResponse SetModelTagResponse path := "/api/2.0preview/mlflow/registered-models/set-tag" queryParams := make(map[string]any) @@ -1013,7 +1013,7 @@ func (a *modelRegistryPreviewImpl) SetModelTag(ctx context.Context, request SetM return err } -func (a *modelRegistryPreviewImpl) SetModelVersionTag(ctx context.Context, request SetModelVersionTagRequest) error { +func (a *modelRegistryImpl) SetModelVersionTag(ctx context.Context, request SetModelVersionTagRequest) error { var setModelVersionTagResponse SetModelVersionTagResponse path := "/api/2.0preview/mlflow/model-versions/set-tag" queryParams := make(map[string]any) @@ -1024,7 +1024,7 @@ func (a *modelRegistryPreviewImpl) SetModelVersionTag(ctx context.Context, reque return err } -func (a *modelRegistryPreviewImpl) SetPermissions(ctx context.Context, request RegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { +func (a *modelRegistryImpl) SetPermissions(ctx context.Context, request RegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { var registeredModelPermissions RegisteredModelPermissions path := fmt.Sprintf("/api/2.0preview/permissions/registered-models/%v", request.RegisteredModelId) queryParams := make(map[string]any) @@ -1035,7 +1035,7 @@ func (a *modelRegistryPreviewImpl) SetPermissions(ctx context.Context, request R return ®isteredModelPermissions, err } -func (a *modelRegistryPreviewImpl) TestRegistryWebhook(ctx context.Context, request TestRegistryWebhookRequest) (*TestRegistryWebhookResponse, error) { +func (a *modelRegistryImpl) TestRegistryWebhook(ctx context.Context, request TestRegistryWebhookRequest) (*TestRegistryWebhookResponse, error) { var testRegistryWebhookResponse TestRegistryWebhookResponse path := "/api/2.0preview/mlflow/registry-webhooks/test" queryParams := make(map[string]any) @@ -1046,7 +1046,7 @@ func (a *modelRegistryPreviewImpl) TestRegistryWebhook(ctx context.Context, requ return &testRegistryWebhookResponse, err } -func (a *modelRegistryPreviewImpl) TransitionStage(ctx context.Context, request TransitionModelVersionStageDatabricks) (*TransitionStageResponse, error) { +func (a *modelRegistryImpl) TransitionStage(ctx context.Context, request TransitionModelVersionStageDatabricks) (*TransitionStageResponse, error) { var transitionStageResponse TransitionStageResponse path := "/api/2.0preview/mlflow/databricks/model-versions/transition-stage" queryParams := make(map[string]any) @@ -1057,7 +1057,7 @@ func (a *modelRegistryPreviewImpl) TransitionStage(ctx context.Context, request return &transitionStageResponse, err } -func (a *modelRegistryPreviewImpl) UpdateComment(ctx context.Context, request UpdateComment) (*UpdateCommentResponse, error) { +func (a *modelRegistryImpl) UpdateComment(ctx context.Context, request UpdateComment) (*UpdateCommentResponse, error) { var updateCommentResponse UpdateCommentResponse path := "/api/2.0preview/mlflow/comments/update" queryParams := make(map[string]any) @@ -1068,7 +1068,7 @@ func (a *modelRegistryPreviewImpl) UpdateComment(ctx context.Context, request Up return &updateCommentResponse, err } -func (a *modelRegistryPreviewImpl) UpdateModel(ctx context.Context, request UpdateModelRequest) error { +func (a *modelRegistryImpl) UpdateModel(ctx context.Context, request UpdateModelRequest) error { var updateModelResponse UpdateModelResponse path := "/api/2.0preview/mlflow/registered-models/update" queryParams := make(map[string]any) @@ -1079,7 +1079,7 @@ func (a *modelRegistryPreviewImpl) UpdateModel(ctx context.Context, request Upda return err } -func (a *modelRegistryPreviewImpl) UpdateModelVersion(ctx context.Context, request UpdateModelVersionRequest) error { +func (a *modelRegistryImpl) UpdateModelVersion(ctx context.Context, request UpdateModelVersionRequest) error { var updateModelVersionResponse UpdateModelVersionResponse path := "/api/2.0preview/mlflow/model-versions/update" queryParams := make(map[string]any) @@ -1090,7 +1090,7 @@ func (a *modelRegistryPreviewImpl) UpdateModelVersion(ctx context.Context, reque return err } -func (a *modelRegistryPreviewImpl) UpdatePermissions(ctx context.Context, request RegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { +func (a *modelRegistryImpl) UpdatePermissions(ctx context.Context, request RegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { var registeredModelPermissions RegisteredModelPermissions path := fmt.Sprintf("/api/2.0preview/permissions/registered-models/%v", request.RegisteredModelId) queryParams := make(map[string]any) @@ -1101,7 +1101,7 @@ func (a *modelRegistryPreviewImpl) UpdatePermissions(ctx context.Context, reques return ®isteredModelPermissions, err } -func (a *modelRegistryPreviewImpl) UpdateWebhook(ctx context.Context, request UpdateRegistryWebhook) error { +func (a *modelRegistryImpl) UpdateWebhook(ctx context.Context, request UpdateRegistryWebhook) error { var updateWebhookResponse UpdateWebhookResponse path := "/api/2.0preview/mlflow/registry-webhooks/update" queryParams := make(map[string]any) diff --git a/oauth2/v2preview/api.go b/oauth2/v2preview/api.go index 1d9e44972..cebe03fb0 100755 --- a/oauth2/v2preview/api.go +++ b/oauth2/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Account Federation Policy Preview, Custom App Integration Preview, O Auth Published Apps Preview, Published App Integration Preview, Service Principal Federation Policy Preview, Service Principal Secrets Preview, etc. +// These APIs allow you to manage Account Federation Policy, Custom App Integration, O Auth Published Apps, Published App Integration, Service Principal Federation Policy, Service Principal Secrets, etc. package oauth2preview import ( @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/listing" ) -type AccountFederationPolicyPreviewInterface interface { +type AccountFederationPolicyInterface interface { // Create account federation policy. Create(ctx context.Context, request CreateAccountFederationPolicyRequest) (*FederationPolicy, error) @@ -41,9 +41,9 @@ type AccountFederationPolicyPreviewInterface interface { Update(ctx context.Context, request UpdateAccountFederationPolicyRequest) (*FederationPolicy, error) } -func NewAccountFederationPolicyPreview(client *client.DatabricksClient) *AccountFederationPolicyPreviewAPI { - return &AccountFederationPolicyPreviewAPI{ - accountFederationPolicyPreviewImpl: accountFederationPolicyPreviewImpl{ +func NewAccountFederationPolicy(client *client.DatabricksClient) *AccountFederationPolicyAPI { + return &AccountFederationPolicyAPI{ + accountFederationPolicyImpl: accountFederationPolicyImpl{ client: client, }, } @@ -99,25 +99,25 @@ func NewAccountFederationPolicyPreview(client *client.DatabricksClient) *Account // federation. // // [SCIM]: https://docs.databricks.com/admin/users-groups/scim/index.html -type AccountFederationPolicyPreviewAPI struct { - accountFederationPolicyPreviewImpl +type AccountFederationPolicyAPI struct { + accountFederationPolicyImpl } // Delete account federation policy. -func (a *AccountFederationPolicyPreviewAPI) DeleteByPolicyId(ctx context.Context, policyId string) error { - return a.accountFederationPolicyPreviewImpl.Delete(ctx, DeleteAccountFederationPolicyRequest{ +func (a *AccountFederationPolicyAPI) DeleteByPolicyId(ctx context.Context, policyId string) error { + return a.accountFederationPolicyImpl.Delete(ctx, DeleteAccountFederationPolicyRequest{ PolicyId: policyId, }) } // Get account federation policy. -func (a *AccountFederationPolicyPreviewAPI) GetByPolicyId(ctx context.Context, policyId string) (*FederationPolicy, error) { - return a.accountFederationPolicyPreviewImpl.Get(ctx, GetAccountFederationPolicyRequest{ +func (a *AccountFederationPolicyAPI) GetByPolicyId(ctx context.Context, policyId string) (*FederationPolicy, error) { + return a.accountFederationPolicyImpl.Get(ctx, GetAccountFederationPolicyRequest{ PolicyId: policyId, }) } -type CustomAppIntegrationPreviewInterface interface { +type CustomAppIntegrationInterface interface { // Create Custom OAuth App Integration. // @@ -172,9 +172,9 @@ type CustomAppIntegrationPreviewInterface interface { Update(ctx context.Context, request UpdateCustomAppIntegration) error } -func NewCustomAppIntegrationPreview(client *client.DatabricksClient) *CustomAppIntegrationPreviewAPI { - return &CustomAppIntegrationPreviewAPI{ - customAppIntegrationPreviewImpl: customAppIntegrationPreviewImpl{ +func NewCustomAppIntegration(client *client.DatabricksClient) *CustomAppIntegrationAPI { + return &CustomAppIntegrationAPI{ + customAppIntegrationImpl: customAppIntegrationImpl{ client: client, }, } @@ -183,16 +183,16 @@ func NewCustomAppIntegrationPreview(client *client.DatabricksClient) *CustomAppI // These APIs enable administrators to manage custom OAuth app integrations, // which is required for adding/using Custom OAuth App Integration like Tableau // Cloud for Databricks in AWS cloud. -type CustomAppIntegrationPreviewAPI struct { - customAppIntegrationPreviewImpl +type CustomAppIntegrationAPI struct { + customAppIntegrationImpl } // Delete Custom OAuth App Integration. // // Delete an existing Custom OAuth App Integration. You can retrieve the custom // OAuth app integration via :method:CustomAppIntegration/get. -func (a *CustomAppIntegrationPreviewAPI) DeleteByIntegrationId(ctx context.Context, integrationId string) error { - return a.customAppIntegrationPreviewImpl.Delete(ctx, DeleteCustomAppIntegrationRequest{ +func (a *CustomAppIntegrationAPI) DeleteByIntegrationId(ctx context.Context, integrationId string) error { + return a.customAppIntegrationImpl.Delete(ctx, DeleteCustomAppIntegrationRequest{ IntegrationId: integrationId, }) } @@ -200,13 +200,13 @@ func (a *CustomAppIntegrationPreviewAPI) DeleteByIntegrationId(ctx context.Conte // Get OAuth Custom App Integration. // // Gets the Custom OAuth App Integration for the given integration id. -func (a *CustomAppIntegrationPreviewAPI) GetByIntegrationId(ctx context.Context, integrationId string) (*GetCustomAppIntegrationOutput, error) { - return a.customAppIntegrationPreviewImpl.Get(ctx, GetCustomAppIntegrationRequest{ +func (a *CustomAppIntegrationAPI) GetByIntegrationId(ctx context.Context, integrationId string) (*GetCustomAppIntegrationOutput, error) { + return a.customAppIntegrationImpl.Get(ctx, GetCustomAppIntegrationRequest{ IntegrationId: integrationId, }) } -type OAuthPublishedAppsPreviewInterface interface { +type OAuthPublishedAppsInterface interface { // Get all the published OAuth apps. // @@ -223,9 +223,9 @@ type OAuthPublishedAppsPreviewInterface interface { ListAll(ctx context.Context, request ListOAuthPublishedAppsRequest) ([]PublishedAppOutput, error) } -func NewOAuthPublishedAppsPreview(client *client.DatabricksClient) *OAuthPublishedAppsPreviewAPI { - return &OAuthPublishedAppsPreviewAPI{ - oAuthPublishedAppsPreviewImpl: oAuthPublishedAppsPreviewImpl{ +func NewOAuthPublishedApps(client *client.DatabricksClient) *OAuthPublishedAppsAPI { + return &OAuthPublishedAppsAPI{ + oAuthPublishedAppsImpl: oAuthPublishedAppsImpl{ client: client, }, } @@ -235,11 +235,11 @@ func NewOAuthPublishedAppsPreview(client *client.DatabricksClient) *OAuthPublish // applications in Databricks. Administrators can add the published OAuth // applications to their account through the OAuth Published App Integration // APIs. -type OAuthPublishedAppsPreviewAPI struct { - oAuthPublishedAppsPreviewImpl +type OAuthPublishedAppsAPI struct { + oAuthPublishedAppsImpl } -type PublishedAppIntegrationPreviewInterface interface { +type PublishedAppIntegrationInterface interface { // Create Published OAuth App Integration. // @@ -294,9 +294,9 @@ type PublishedAppIntegrationPreviewInterface interface { Update(ctx context.Context, request UpdatePublishedAppIntegration) error } -func NewPublishedAppIntegrationPreview(client *client.DatabricksClient) *PublishedAppIntegrationPreviewAPI { - return &PublishedAppIntegrationPreviewAPI{ - publishedAppIntegrationPreviewImpl: publishedAppIntegrationPreviewImpl{ +func NewPublishedAppIntegration(client *client.DatabricksClient) *PublishedAppIntegrationAPI { + return &PublishedAppIntegrationAPI{ + publishedAppIntegrationImpl: publishedAppIntegrationImpl{ client: client, }, } @@ -305,16 +305,16 @@ func NewPublishedAppIntegrationPreview(client *client.DatabricksClient) *Publish // These APIs enable administrators to manage published OAuth app integrations, // which is required for adding/using Published OAuth App Integration like // Tableau Desktop for Databricks in AWS cloud. -type PublishedAppIntegrationPreviewAPI struct { - publishedAppIntegrationPreviewImpl +type PublishedAppIntegrationAPI struct { + publishedAppIntegrationImpl } // Delete Published OAuth App Integration. // // Delete an existing Published OAuth App Integration. You can retrieve the // published OAuth app integration via :method:PublishedAppIntegration/get. -func (a *PublishedAppIntegrationPreviewAPI) DeleteByIntegrationId(ctx context.Context, integrationId string) error { - return a.publishedAppIntegrationPreviewImpl.Delete(ctx, DeletePublishedAppIntegrationRequest{ +func (a *PublishedAppIntegrationAPI) DeleteByIntegrationId(ctx context.Context, integrationId string) error { + return a.publishedAppIntegrationImpl.Delete(ctx, DeletePublishedAppIntegrationRequest{ IntegrationId: integrationId, }) } @@ -322,13 +322,13 @@ func (a *PublishedAppIntegrationPreviewAPI) DeleteByIntegrationId(ctx context.Co // Get OAuth Published App Integration. // // Gets the Published OAuth App Integration for the given integration id. -func (a *PublishedAppIntegrationPreviewAPI) GetByIntegrationId(ctx context.Context, integrationId string) (*GetPublishedAppIntegrationOutput, error) { - return a.publishedAppIntegrationPreviewImpl.Get(ctx, GetPublishedAppIntegrationRequest{ +func (a *PublishedAppIntegrationAPI) GetByIntegrationId(ctx context.Context, integrationId string) (*GetPublishedAppIntegrationOutput, error) { + return a.publishedAppIntegrationImpl.Get(ctx, GetPublishedAppIntegrationRequest{ IntegrationId: integrationId, }) } -type ServicePrincipalFederationPolicyPreviewInterface interface { +type ServicePrincipalFederationPolicyInterface interface { // Create service principal federation policy. Create(ctx context.Context, request CreateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) @@ -362,9 +362,9 @@ type ServicePrincipalFederationPolicyPreviewInterface interface { Update(ctx context.Context, request UpdateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) } -func NewServicePrincipalFederationPolicyPreview(client *client.DatabricksClient) *ServicePrincipalFederationPolicyPreviewAPI { - return &ServicePrincipalFederationPolicyPreviewAPI{ - servicePrincipalFederationPolicyPreviewImpl: servicePrincipalFederationPolicyPreviewImpl{ +func NewServicePrincipalFederationPolicy(client *client.DatabricksClient) *ServicePrincipalFederationPolicyAPI { + return &ServicePrincipalFederationPolicyAPI{ + servicePrincipalFederationPolicyImpl: servicePrincipalFederationPolicyImpl{ client: client, }, } @@ -424,34 +424,34 @@ func NewServicePrincipalFederationPolicyPreview(client *client.DatabricksClient) // // You do not need to configure an OAuth application in Databricks to use token // federation. -type ServicePrincipalFederationPolicyPreviewAPI struct { - servicePrincipalFederationPolicyPreviewImpl +type ServicePrincipalFederationPolicyAPI struct { + servicePrincipalFederationPolicyImpl } // Delete service principal federation policy. -func (a *ServicePrincipalFederationPolicyPreviewAPI) DeleteByServicePrincipalIdAndPolicyId(ctx context.Context, servicePrincipalId int64, policyId string) error { - return a.servicePrincipalFederationPolicyPreviewImpl.Delete(ctx, DeleteServicePrincipalFederationPolicyRequest{ +func (a *ServicePrincipalFederationPolicyAPI) DeleteByServicePrincipalIdAndPolicyId(ctx context.Context, servicePrincipalId int64, policyId string) error { + return a.servicePrincipalFederationPolicyImpl.Delete(ctx, DeleteServicePrincipalFederationPolicyRequest{ ServicePrincipalId: servicePrincipalId, PolicyId: policyId, }) } // Get service principal federation policy. -func (a *ServicePrincipalFederationPolicyPreviewAPI) GetByServicePrincipalIdAndPolicyId(ctx context.Context, servicePrincipalId int64, policyId string) (*FederationPolicy, error) { - return a.servicePrincipalFederationPolicyPreviewImpl.Get(ctx, GetServicePrincipalFederationPolicyRequest{ +func (a *ServicePrincipalFederationPolicyAPI) GetByServicePrincipalIdAndPolicyId(ctx context.Context, servicePrincipalId int64, policyId string) (*FederationPolicy, error) { + return a.servicePrincipalFederationPolicyImpl.Get(ctx, GetServicePrincipalFederationPolicyRequest{ ServicePrincipalId: servicePrincipalId, PolicyId: policyId, }) } // List service principal federation policies. -func (a *ServicePrincipalFederationPolicyPreviewAPI) ListByServicePrincipalId(ctx context.Context, servicePrincipalId int64) (*ListFederationPoliciesResponse, error) { - return a.servicePrincipalFederationPolicyPreviewImpl.internalList(ctx, ListServicePrincipalFederationPoliciesRequest{ +func (a *ServicePrincipalFederationPolicyAPI) ListByServicePrincipalId(ctx context.Context, servicePrincipalId int64) (*ListFederationPoliciesResponse, error) { + return a.servicePrincipalFederationPolicyImpl.internalList(ctx, ListServicePrincipalFederationPoliciesRequest{ ServicePrincipalId: servicePrincipalId, }) } -type ServicePrincipalSecretsPreviewInterface interface { +type ServicePrincipalSecretsInterface interface { // Create service principal secret. // @@ -494,9 +494,9 @@ type ServicePrincipalSecretsPreviewInterface interface { ListByServicePrincipalId(ctx context.Context, servicePrincipalId int64) (*ListServicePrincipalSecretsResponse, error) } -func NewServicePrincipalSecretsPreview(client *client.DatabricksClient) *ServicePrincipalSecretsPreviewAPI { - return &ServicePrincipalSecretsPreviewAPI{ - servicePrincipalSecretsPreviewImpl: servicePrincipalSecretsPreviewImpl{ +func NewServicePrincipalSecrets(client *client.DatabricksClient) *ServicePrincipalSecretsAPI { + return &ServicePrincipalSecretsAPI{ + servicePrincipalSecretsImpl: servicePrincipalSecretsImpl{ client: client, }, } @@ -515,15 +515,15 @@ func NewServicePrincipalSecretsPreview(client *client.DatabricksClient) *Service // // [Authentication using OAuth tokens for service principals]: https://docs.databricks.com/dev-tools/authentication-oauth.html // [Databricks Terraform Provider]: https://github.com/databricks/terraform-provider-databricks/blob/master/docs/index.md#authenticating-with-service-principal -type ServicePrincipalSecretsPreviewAPI struct { - servicePrincipalSecretsPreviewImpl +type ServicePrincipalSecretsAPI struct { + servicePrincipalSecretsImpl } // Delete service principal secret. // // Delete a secret from the given service principal. -func (a *ServicePrincipalSecretsPreviewAPI) DeleteByServicePrincipalIdAndSecretId(ctx context.Context, servicePrincipalId int64, secretId string) error { - return a.servicePrincipalSecretsPreviewImpl.Delete(ctx, DeleteServicePrincipalSecretRequest{ +func (a *ServicePrincipalSecretsAPI) DeleteByServicePrincipalIdAndSecretId(ctx context.Context, servicePrincipalId int64, secretId string) error { + return a.servicePrincipalSecretsImpl.Delete(ctx, DeleteServicePrincipalSecretRequest{ ServicePrincipalId: servicePrincipalId, SecretId: secretId, }) @@ -534,8 +534,8 @@ func (a *ServicePrincipalSecretsPreviewAPI) DeleteByServicePrincipalIdAndSecretI // List all secrets associated with the given service principal. This operation // only returns information about the secrets themselves and does not include // the secret values. -func (a *ServicePrincipalSecretsPreviewAPI) ListByServicePrincipalId(ctx context.Context, servicePrincipalId int64) (*ListServicePrincipalSecretsResponse, error) { - return a.servicePrincipalSecretsPreviewImpl.internalList(ctx, ListServicePrincipalSecretsRequest{ +func (a *ServicePrincipalSecretsAPI) ListByServicePrincipalId(ctx context.Context, servicePrincipalId int64) (*ListServicePrincipalSecretsResponse, error) { + return a.servicePrincipalSecretsImpl.internalList(ctx, ListServicePrincipalSecretsRequest{ ServicePrincipalId: servicePrincipalId, }) } diff --git a/oauth2/v2preview/client.go b/oauth2/v2preview/client.go index fa5d1190c..f2893a84e 100755 --- a/oauth2/v2preview/client.go +++ b/oauth2/v2preview/client.go @@ -9,13 +9,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/config" ) -type AccountFederationPolicyPreviewClient struct { - AccountFederationPolicyPreviewInterface +type AccountFederationPolicyClient struct { + AccountFederationPolicyInterface Config *config.Config } -func NewAccountFederationPolicyPreviewClient(cfg *config.Config) (*AccountFederationPolicyPreviewClient, error) { +func NewAccountFederationPolicyClient(cfg *config.Config) (*AccountFederationPolicyClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -33,19 +33,19 @@ func NewAccountFederationPolicyPreviewClient(cfg *config.Config) (*AccountFedera return nil, err } - return &AccountFederationPolicyPreviewClient{ - Config: cfg, - AccountFederationPolicyPreviewInterface: NewAccountFederationPolicyPreview(apiClient), + return &AccountFederationPolicyClient{ + Config: cfg, + AccountFederationPolicyInterface: NewAccountFederationPolicy(apiClient), }, nil } -type CustomAppIntegrationPreviewClient struct { - CustomAppIntegrationPreviewInterface +type CustomAppIntegrationClient struct { + CustomAppIntegrationInterface Config *config.Config } -func NewCustomAppIntegrationPreviewClient(cfg *config.Config) (*CustomAppIntegrationPreviewClient, error) { +func NewCustomAppIntegrationClient(cfg *config.Config) (*CustomAppIntegrationClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -63,19 +63,19 @@ func NewCustomAppIntegrationPreviewClient(cfg *config.Config) (*CustomAppIntegra return nil, err } - return &CustomAppIntegrationPreviewClient{ - Config: cfg, - CustomAppIntegrationPreviewInterface: NewCustomAppIntegrationPreview(apiClient), + return &CustomAppIntegrationClient{ + Config: cfg, + CustomAppIntegrationInterface: NewCustomAppIntegration(apiClient), }, nil } -type OAuthPublishedAppsPreviewClient struct { - OAuthPublishedAppsPreviewInterface +type OAuthPublishedAppsClient struct { + OAuthPublishedAppsInterface Config *config.Config } -func NewOAuthPublishedAppsPreviewClient(cfg *config.Config) (*OAuthPublishedAppsPreviewClient, error) { +func NewOAuthPublishedAppsClient(cfg *config.Config) (*OAuthPublishedAppsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -93,19 +93,19 @@ func NewOAuthPublishedAppsPreviewClient(cfg *config.Config) (*OAuthPublishedApps return nil, err } - return &OAuthPublishedAppsPreviewClient{ - Config: cfg, - OAuthPublishedAppsPreviewInterface: NewOAuthPublishedAppsPreview(apiClient), + return &OAuthPublishedAppsClient{ + Config: cfg, + OAuthPublishedAppsInterface: NewOAuthPublishedApps(apiClient), }, nil } -type PublishedAppIntegrationPreviewClient struct { - PublishedAppIntegrationPreviewInterface +type PublishedAppIntegrationClient struct { + PublishedAppIntegrationInterface Config *config.Config } -func NewPublishedAppIntegrationPreviewClient(cfg *config.Config) (*PublishedAppIntegrationPreviewClient, error) { +func NewPublishedAppIntegrationClient(cfg *config.Config) (*PublishedAppIntegrationClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -123,19 +123,19 @@ func NewPublishedAppIntegrationPreviewClient(cfg *config.Config) (*PublishedAppI return nil, err } - return &PublishedAppIntegrationPreviewClient{ - Config: cfg, - PublishedAppIntegrationPreviewInterface: NewPublishedAppIntegrationPreview(apiClient), + return &PublishedAppIntegrationClient{ + Config: cfg, + PublishedAppIntegrationInterface: NewPublishedAppIntegration(apiClient), }, nil } -type ServicePrincipalFederationPolicyPreviewClient struct { - ServicePrincipalFederationPolicyPreviewInterface +type ServicePrincipalFederationPolicyClient struct { + ServicePrincipalFederationPolicyInterface Config *config.Config } -func NewServicePrincipalFederationPolicyPreviewClient(cfg *config.Config) (*ServicePrincipalFederationPolicyPreviewClient, error) { +func NewServicePrincipalFederationPolicyClient(cfg *config.Config) (*ServicePrincipalFederationPolicyClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -153,19 +153,19 @@ func NewServicePrincipalFederationPolicyPreviewClient(cfg *config.Config) (*Serv return nil, err } - return &ServicePrincipalFederationPolicyPreviewClient{ + return &ServicePrincipalFederationPolicyClient{ Config: cfg, - ServicePrincipalFederationPolicyPreviewInterface: NewServicePrincipalFederationPolicyPreview(apiClient), + ServicePrincipalFederationPolicyInterface: NewServicePrincipalFederationPolicy(apiClient), }, nil } -type ServicePrincipalSecretsPreviewClient struct { - ServicePrincipalSecretsPreviewInterface +type ServicePrincipalSecretsClient struct { + ServicePrincipalSecretsInterface Config *config.Config } -func NewServicePrincipalSecretsPreviewClient(cfg *config.Config) (*ServicePrincipalSecretsPreviewClient, error) { +func NewServicePrincipalSecretsClient(cfg *config.Config) (*ServicePrincipalSecretsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -183,8 +183,8 @@ func NewServicePrincipalSecretsPreviewClient(cfg *config.Config) (*ServicePrinci return nil, err } - return &ServicePrincipalSecretsPreviewClient{ - Config: cfg, - ServicePrincipalSecretsPreviewInterface: NewServicePrincipalSecretsPreview(apiClient), + return &ServicePrincipalSecretsClient{ + Config: cfg, + ServicePrincipalSecretsInterface: NewServicePrincipalSecrets(apiClient), }, nil } diff --git a/oauth2/v2preview/impl.go b/oauth2/v2preview/impl.go index bc755cdc7..825be3765 100755 --- a/oauth2/v2preview/impl.go +++ b/oauth2/v2preview/impl.go @@ -13,12 +13,12 @@ import ( "golang.org/x/exp/slices" ) -// unexported type that holds implementations of just AccountFederationPolicyPreview API methods -type accountFederationPolicyPreviewImpl struct { +// unexported type that holds implementations of just AccountFederationPolicy API methods +type accountFederationPolicyImpl struct { client *client.DatabricksClient } -func (a *accountFederationPolicyPreviewImpl) Create(ctx context.Context, request CreateAccountFederationPolicyRequest) (*FederationPolicy, error) { +func (a *accountFederationPolicyImpl) Create(ctx context.Context, request CreateAccountFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0preview/accounts/%v/federationPolicies", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -32,7 +32,7 @@ func (a *accountFederationPolicyPreviewImpl) Create(ctx context.Context, request return &federationPolicy, err } -func (a *accountFederationPolicyPreviewImpl) Delete(ctx context.Context, request DeleteAccountFederationPolicyRequest) error { +func (a *accountFederationPolicyImpl) Delete(ctx context.Context, request DeleteAccountFederationPolicyRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.PolicyId) queryParams := make(map[string]any) @@ -42,7 +42,7 @@ func (a *accountFederationPolicyPreviewImpl) Delete(ctx context.Context, request return err } -func (a *accountFederationPolicyPreviewImpl) Get(ctx context.Context, request GetAccountFederationPolicyRequest) (*FederationPolicy, error) { +func (a *accountFederationPolicyImpl) Get(ctx context.Context, request GetAccountFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0preview/accounts/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.PolicyId) queryParams := make(map[string]any) @@ -53,7 +53,7 @@ func (a *accountFederationPolicyPreviewImpl) Get(ctx context.Context, request Ge } // List account federation policies. -func (a *accountFederationPolicyPreviewImpl) List(ctx context.Context, request ListAccountFederationPoliciesRequest) listing.Iterator[FederationPolicy] { +func (a *accountFederationPolicyImpl) List(ctx context.Context, request ListAccountFederationPoliciesRequest) listing.Iterator[FederationPolicy] { getNextPage := func(ctx context.Context, req ListAccountFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -78,11 +78,11 @@ func (a *accountFederationPolicyPreviewImpl) List(ctx context.Context, request L } // List account federation policies. -func (a *accountFederationPolicyPreviewImpl) ListAll(ctx context.Context, request ListAccountFederationPoliciesRequest) ([]FederationPolicy, error) { +func (a *accountFederationPolicyImpl) ListAll(ctx context.Context, request ListAccountFederationPoliciesRequest) ([]FederationPolicy, error) { iterator := a.List(ctx, request) return listing.ToSlice[FederationPolicy](ctx, iterator) } -func (a *accountFederationPolicyPreviewImpl) internalList(ctx context.Context, request ListAccountFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { +func (a *accountFederationPolicyImpl) internalList(ctx context.Context, request ListAccountFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { var listFederationPoliciesResponse ListFederationPoliciesResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/federationPolicies", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -92,7 +92,7 @@ func (a *accountFederationPolicyPreviewImpl) internalList(ctx context.Context, r return &listFederationPoliciesResponse, err } -func (a *accountFederationPolicyPreviewImpl) Update(ctx context.Context, request UpdateAccountFederationPolicyRequest) (*FederationPolicy, error) { +func (a *accountFederationPolicyImpl) Update(ctx context.Context, request UpdateAccountFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0preview/accounts/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.PolicyId) queryParams := make(map[string]any) @@ -106,12 +106,12 @@ func (a *accountFederationPolicyPreviewImpl) Update(ctx context.Context, request return &federationPolicy, err } -// unexported type that holds implementations of just CustomAppIntegrationPreview API methods -type customAppIntegrationPreviewImpl struct { +// unexported type that holds implementations of just CustomAppIntegration API methods +type customAppIntegrationImpl struct { client *client.DatabricksClient } -func (a *customAppIntegrationPreviewImpl) Create(ctx context.Context, request CreateCustomAppIntegration) (*CreateCustomAppIntegrationOutput, error) { +func (a *customAppIntegrationImpl) Create(ctx context.Context, request CreateCustomAppIntegration) (*CreateCustomAppIntegrationOutput, error) { var createCustomAppIntegrationOutput CreateCustomAppIntegrationOutput path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/custom-app-integrations", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -122,7 +122,7 @@ func (a *customAppIntegrationPreviewImpl) Create(ctx context.Context, request Cr return &createCustomAppIntegrationOutput, err } -func (a *customAppIntegrationPreviewImpl) Delete(ctx context.Context, request DeleteCustomAppIntegrationRequest) error { +func (a *customAppIntegrationImpl) Delete(ctx context.Context, request DeleteCustomAppIntegrationRequest) error { var deleteCustomAppIntegrationOutput DeleteCustomAppIntegrationOutput path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/custom-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) queryParams := make(map[string]any) @@ -132,7 +132,7 @@ func (a *customAppIntegrationPreviewImpl) Delete(ctx context.Context, request De return err } -func (a *customAppIntegrationPreviewImpl) Get(ctx context.Context, request GetCustomAppIntegrationRequest) (*GetCustomAppIntegrationOutput, error) { +func (a *customAppIntegrationImpl) Get(ctx context.Context, request GetCustomAppIntegrationRequest) (*GetCustomAppIntegrationOutput, error) { var getCustomAppIntegrationOutput GetCustomAppIntegrationOutput path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/custom-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) queryParams := make(map[string]any) @@ -146,7 +146,7 @@ func (a *customAppIntegrationPreviewImpl) Get(ctx context.Context, request GetCu // // Get the list of custom OAuth app integrations for the specified Databricks // account -func (a *customAppIntegrationPreviewImpl) List(ctx context.Context, request ListCustomAppIntegrationsRequest) listing.Iterator[GetCustomAppIntegrationOutput] { +func (a *customAppIntegrationImpl) List(ctx context.Context, request ListCustomAppIntegrationsRequest) listing.Iterator[GetCustomAppIntegrationOutput] { getNextPage := func(ctx context.Context, req ListCustomAppIntegrationsRequest) (*GetCustomAppIntegrationsOutput, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -174,11 +174,11 @@ func (a *customAppIntegrationPreviewImpl) List(ctx context.Context, request List // // Get the list of custom OAuth app integrations for the specified Databricks // account -func (a *customAppIntegrationPreviewImpl) ListAll(ctx context.Context, request ListCustomAppIntegrationsRequest) ([]GetCustomAppIntegrationOutput, error) { +func (a *customAppIntegrationImpl) ListAll(ctx context.Context, request ListCustomAppIntegrationsRequest) ([]GetCustomAppIntegrationOutput, error) { iterator := a.List(ctx, request) return listing.ToSlice[GetCustomAppIntegrationOutput](ctx, iterator) } -func (a *customAppIntegrationPreviewImpl) internalList(ctx context.Context, request ListCustomAppIntegrationsRequest) (*GetCustomAppIntegrationsOutput, error) { +func (a *customAppIntegrationImpl) internalList(ctx context.Context, request ListCustomAppIntegrationsRequest) (*GetCustomAppIntegrationsOutput, error) { var getCustomAppIntegrationsOutput GetCustomAppIntegrationsOutput path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/custom-app-integrations", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -188,7 +188,7 @@ func (a *customAppIntegrationPreviewImpl) internalList(ctx context.Context, requ return &getCustomAppIntegrationsOutput, err } -func (a *customAppIntegrationPreviewImpl) Update(ctx context.Context, request UpdateCustomAppIntegration) error { +func (a *customAppIntegrationImpl) Update(ctx context.Context, request UpdateCustomAppIntegration) error { var updateCustomAppIntegrationOutput UpdateCustomAppIntegrationOutput path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/custom-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) queryParams := make(map[string]any) @@ -199,15 +199,15 @@ func (a *customAppIntegrationPreviewImpl) Update(ctx context.Context, request Up return err } -// unexported type that holds implementations of just OAuthPublishedAppsPreview API methods -type oAuthPublishedAppsPreviewImpl struct { +// unexported type that holds implementations of just OAuthPublishedApps API methods +type oAuthPublishedAppsImpl struct { client *client.DatabricksClient } // Get all the published OAuth apps. // // Get all the available published OAuth apps in Databricks. -func (a *oAuthPublishedAppsPreviewImpl) List(ctx context.Context, request ListOAuthPublishedAppsRequest) listing.Iterator[PublishedAppOutput] { +func (a *oAuthPublishedAppsImpl) List(ctx context.Context, request ListOAuthPublishedAppsRequest) listing.Iterator[PublishedAppOutput] { getNextPage := func(ctx context.Context, req ListOAuthPublishedAppsRequest) (*GetPublishedAppsOutput, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -234,11 +234,11 @@ func (a *oAuthPublishedAppsPreviewImpl) List(ctx context.Context, request ListOA // Get all the published OAuth apps. // // Get all the available published OAuth apps in Databricks. -func (a *oAuthPublishedAppsPreviewImpl) ListAll(ctx context.Context, request ListOAuthPublishedAppsRequest) ([]PublishedAppOutput, error) { +func (a *oAuthPublishedAppsImpl) ListAll(ctx context.Context, request ListOAuthPublishedAppsRequest) ([]PublishedAppOutput, error) { iterator := a.List(ctx, request) return listing.ToSlice[PublishedAppOutput](ctx, iterator) } -func (a *oAuthPublishedAppsPreviewImpl) internalList(ctx context.Context, request ListOAuthPublishedAppsRequest) (*GetPublishedAppsOutput, error) { +func (a *oAuthPublishedAppsImpl) internalList(ctx context.Context, request ListOAuthPublishedAppsRequest) (*GetPublishedAppsOutput, error) { var getPublishedAppsOutput GetPublishedAppsOutput path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-apps", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -248,12 +248,12 @@ func (a *oAuthPublishedAppsPreviewImpl) internalList(ctx context.Context, reques return &getPublishedAppsOutput, err } -// unexported type that holds implementations of just PublishedAppIntegrationPreview API methods -type publishedAppIntegrationPreviewImpl struct { +// unexported type that holds implementations of just PublishedAppIntegration API methods +type publishedAppIntegrationImpl struct { client *client.DatabricksClient } -func (a *publishedAppIntegrationPreviewImpl) Create(ctx context.Context, request CreatePublishedAppIntegration) (*CreatePublishedAppIntegrationOutput, error) { +func (a *publishedAppIntegrationImpl) Create(ctx context.Context, request CreatePublishedAppIntegration) (*CreatePublishedAppIntegrationOutput, error) { var createPublishedAppIntegrationOutput CreatePublishedAppIntegrationOutput path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-app-integrations", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -264,7 +264,7 @@ func (a *publishedAppIntegrationPreviewImpl) Create(ctx context.Context, request return &createPublishedAppIntegrationOutput, err } -func (a *publishedAppIntegrationPreviewImpl) Delete(ctx context.Context, request DeletePublishedAppIntegrationRequest) error { +func (a *publishedAppIntegrationImpl) Delete(ctx context.Context, request DeletePublishedAppIntegrationRequest) error { var deletePublishedAppIntegrationOutput DeletePublishedAppIntegrationOutput path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) queryParams := make(map[string]any) @@ -274,7 +274,7 @@ func (a *publishedAppIntegrationPreviewImpl) Delete(ctx context.Context, request return err } -func (a *publishedAppIntegrationPreviewImpl) Get(ctx context.Context, request GetPublishedAppIntegrationRequest) (*GetPublishedAppIntegrationOutput, error) { +func (a *publishedAppIntegrationImpl) Get(ctx context.Context, request GetPublishedAppIntegrationRequest) (*GetPublishedAppIntegrationOutput, error) { var getPublishedAppIntegrationOutput GetPublishedAppIntegrationOutput path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) queryParams := make(map[string]any) @@ -288,7 +288,7 @@ func (a *publishedAppIntegrationPreviewImpl) Get(ctx context.Context, request Ge // // Get the list of published OAuth app integrations for the specified Databricks // account -func (a *publishedAppIntegrationPreviewImpl) List(ctx context.Context, request ListPublishedAppIntegrationsRequest) listing.Iterator[GetPublishedAppIntegrationOutput] { +func (a *publishedAppIntegrationImpl) List(ctx context.Context, request ListPublishedAppIntegrationsRequest) listing.Iterator[GetPublishedAppIntegrationOutput] { getNextPage := func(ctx context.Context, req ListPublishedAppIntegrationsRequest) (*GetPublishedAppIntegrationsOutput, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -316,11 +316,11 @@ func (a *publishedAppIntegrationPreviewImpl) List(ctx context.Context, request L // // Get the list of published OAuth app integrations for the specified Databricks // account -func (a *publishedAppIntegrationPreviewImpl) ListAll(ctx context.Context, request ListPublishedAppIntegrationsRequest) ([]GetPublishedAppIntegrationOutput, error) { +func (a *publishedAppIntegrationImpl) ListAll(ctx context.Context, request ListPublishedAppIntegrationsRequest) ([]GetPublishedAppIntegrationOutput, error) { iterator := a.List(ctx, request) return listing.ToSlice[GetPublishedAppIntegrationOutput](ctx, iterator) } -func (a *publishedAppIntegrationPreviewImpl) internalList(ctx context.Context, request ListPublishedAppIntegrationsRequest) (*GetPublishedAppIntegrationsOutput, error) { +func (a *publishedAppIntegrationImpl) internalList(ctx context.Context, request ListPublishedAppIntegrationsRequest) (*GetPublishedAppIntegrationsOutput, error) { var getPublishedAppIntegrationsOutput GetPublishedAppIntegrationsOutput path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-app-integrations", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -330,7 +330,7 @@ func (a *publishedAppIntegrationPreviewImpl) internalList(ctx context.Context, r return &getPublishedAppIntegrationsOutput, err } -func (a *publishedAppIntegrationPreviewImpl) Update(ctx context.Context, request UpdatePublishedAppIntegration) error { +func (a *publishedAppIntegrationImpl) Update(ctx context.Context, request UpdatePublishedAppIntegration) error { var updatePublishedAppIntegrationOutput UpdatePublishedAppIntegrationOutput path := fmt.Sprintf("/api/2.0preview/accounts/%v/oauth2/published-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) queryParams := make(map[string]any) @@ -341,12 +341,12 @@ func (a *publishedAppIntegrationPreviewImpl) Update(ctx context.Context, request return err } -// unexported type that holds implementations of just ServicePrincipalFederationPolicyPreview API methods -type servicePrincipalFederationPolicyPreviewImpl struct { +// unexported type that holds implementations of just ServicePrincipalFederationPolicy API methods +type servicePrincipalFederationPolicyImpl struct { client *client.DatabricksClient } -func (a *servicePrincipalFederationPolicyPreviewImpl) Create(ctx context.Context, request CreateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { +func (a *servicePrincipalFederationPolicyImpl) Create(ctx context.Context, request CreateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/federationPolicies", a.client.ConfiguredAccountID(), request.ServicePrincipalId) queryParams := make(map[string]any) @@ -360,7 +360,7 @@ func (a *servicePrincipalFederationPolicyPreviewImpl) Create(ctx context.Context return &federationPolicy, err } -func (a *servicePrincipalFederationPolicyPreviewImpl) Delete(ctx context.Context, request DeleteServicePrincipalFederationPolicyRequest) error { +func (a *servicePrincipalFederationPolicyImpl) Delete(ctx context.Context, request DeleteServicePrincipalFederationPolicyRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.PolicyId) queryParams := make(map[string]any) @@ -370,7 +370,7 @@ func (a *servicePrincipalFederationPolicyPreviewImpl) Delete(ctx context.Context return err } -func (a *servicePrincipalFederationPolicyPreviewImpl) Get(ctx context.Context, request GetServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { +func (a *servicePrincipalFederationPolicyImpl) Get(ctx context.Context, request GetServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.PolicyId) queryParams := make(map[string]any) @@ -381,7 +381,7 @@ func (a *servicePrincipalFederationPolicyPreviewImpl) Get(ctx context.Context, r } // List service principal federation policies. -func (a *servicePrincipalFederationPolicyPreviewImpl) List(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) listing.Iterator[FederationPolicy] { +func (a *servicePrincipalFederationPolicyImpl) List(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) listing.Iterator[FederationPolicy] { getNextPage := func(ctx context.Context, req ListServicePrincipalFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -406,11 +406,11 @@ func (a *servicePrincipalFederationPolicyPreviewImpl) List(ctx context.Context, } // List service principal federation policies. -func (a *servicePrincipalFederationPolicyPreviewImpl) ListAll(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) ([]FederationPolicy, error) { +func (a *servicePrincipalFederationPolicyImpl) ListAll(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) ([]FederationPolicy, error) { iterator := a.List(ctx, request) return listing.ToSlice[FederationPolicy](ctx, iterator) } -func (a *servicePrincipalFederationPolicyPreviewImpl) internalList(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { +func (a *servicePrincipalFederationPolicyImpl) internalList(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { var listFederationPoliciesResponse ListFederationPoliciesResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/federationPolicies", a.client.ConfiguredAccountID(), request.ServicePrincipalId) queryParams := make(map[string]any) @@ -420,7 +420,7 @@ func (a *servicePrincipalFederationPolicyPreviewImpl) internalList(ctx context.C return &listFederationPoliciesResponse, err } -func (a *servicePrincipalFederationPolicyPreviewImpl) Update(ctx context.Context, request UpdateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { +func (a *servicePrincipalFederationPolicyImpl) Update(ctx context.Context, request UpdateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.PolicyId) queryParams := make(map[string]any) @@ -434,12 +434,12 @@ func (a *servicePrincipalFederationPolicyPreviewImpl) Update(ctx context.Context return &federationPolicy, err } -// unexported type that holds implementations of just ServicePrincipalSecretsPreview API methods -type servicePrincipalSecretsPreviewImpl struct { +// unexported type that holds implementations of just ServicePrincipalSecrets API methods +type servicePrincipalSecretsImpl struct { client *client.DatabricksClient } -func (a *servicePrincipalSecretsPreviewImpl) Create(ctx context.Context, request CreateServicePrincipalSecretRequest) (*CreateServicePrincipalSecretResponse, error) { +func (a *servicePrincipalSecretsImpl) Create(ctx context.Context, request CreateServicePrincipalSecretRequest) (*CreateServicePrincipalSecretResponse, error) { var createServicePrincipalSecretResponse CreateServicePrincipalSecretResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/credentials/secrets", a.client.ConfiguredAccountID(), request.ServicePrincipalId) queryParams := make(map[string]any) @@ -449,7 +449,7 @@ func (a *servicePrincipalSecretsPreviewImpl) Create(ctx context.Context, request return &createServicePrincipalSecretResponse, err } -func (a *servicePrincipalSecretsPreviewImpl) Delete(ctx context.Context, request DeleteServicePrincipalSecretRequest) error { +func (a *servicePrincipalSecretsImpl) Delete(ctx context.Context, request DeleteServicePrincipalSecretRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/credentials/secrets/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.SecretId) queryParams := make(map[string]any) @@ -463,7 +463,7 @@ func (a *servicePrincipalSecretsPreviewImpl) Delete(ctx context.Context, request // List all secrets associated with the given service principal. This operation // only returns information about the secrets themselves and does not include // the secret values. -func (a *servicePrincipalSecretsPreviewImpl) List(ctx context.Context, request ListServicePrincipalSecretsRequest) listing.Iterator[SecretInfo] { +func (a *servicePrincipalSecretsImpl) List(ctx context.Context, request ListServicePrincipalSecretsRequest) listing.Iterator[SecretInfo] { getNextPage := func(ctx context.Context, req ListServicePrincipalSecretsRequest) (*ListServicePrincipalSecretsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -492,11 +492,11 @@ func (a *servicePrincipalSecretsPreviewImpl) List(ctx context.Context, request L // List all secrets associated with the given service principal. This operation // only returns information about the secrets themselves and does not include // the secret values. -func (a *servicePrincipalSecretsPreviewImpl) ListAll(ctx context.Context, request ListServicePrincipalSecretsRequest) ([]SecretInfo, error) { +func (a *servicePrincipalSecretsImpl) ListAll(ctx context.Context, request ListServicePrincipalSecretsRequest) ([]SecretInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[SecretInfo](ctx, iterator) } -func (a *servicePrincipalSecretsPreviewImpl) internalList(ctx context.Context, request ListServicePrincipalSecretsRequest) (*ListServicePrincipalSecretsResponse, error) { +func (a *servicePrincipalSecretsImpl) internalList(ctx context.Context, request ListServicePrincipalSecretsRequest) (*ListServicePrincipalSecretsResponse, error) { var listServicePrincipalSecretsResponse ListServicePrincipalSecretsResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/servicePrincipals/%v/credentials/secrets", a.client.ConfiguredAccountID(), request.ServicePrincipalId) queryParams := make(map[string]any) diff --git a/pipelines/v2preview/api.go b/pipelines/v2preview/api.go index 83ba4affa..934d81703 100755 --- a/pipelines/v2preview/api.go +++ b/pipelines/v2preview/api.go @@ -12,7 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type PipelinesPreviewInterface interface { +type PipelinesInterface interface { // Create a pipeline. // @@ -101,7 +101,7 @@ type PipelinesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) - // PipelineStateInfoNameToPipelineIdMap calls [PipelinesPreviewAPI.ListPipelinesAll] and creates a map of results with [PipelineStateInfo].Name as key and [PipelineStateInfo].PipelineId as value. + // PipelineStateInfoNameToPipelineIdMap calls [PipelinesAPI.ListPipelinesAll] and creates a map of results with [PipelineStateInfo].Name as key and [PipelineStateInfo].PipelineId as value. // // Returns an error if there's more than one [PipelineStateInfo] with the same .Name. // @@ -110,7 +110,7 @@ type PipelinesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. PipelineStateInfoNameToPipelineIdMap(ctx context.Context, request ListPipelinesRequest) (map[string]string, error) - // GetByName calls [PipelinesPreviewAPI.PipelineStateInfoNameToPipelineIdMap] and returns a single [PipelineStateInfo]. + // GetByName calls [PipelinesAPI.PipelineStateInfoNameToPipelineIdMap] and returns a single [PipelineStateInfo]. // // Returns an error if there's more than one [PipelineStateInfo] with the same .Name. // @@ -161,9 +161,9 @@ type PipelinesPreviewInterface interface { UpdatePermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) } -func NewPipelinesPreview(client *client.DatabricksClient) *PipelinesPreviewAPI { - return &PipelinesPreviewAPI{ - pipelinesPreviewImpl: pipelinesPreviewImpl{ +func NewPipelines(client *client.DatabricksClient) *PipelinesAPI { + return &PipelinesAPI{ + pipelinesImpl: pipelinesImpl{ client: client, }, } @@ -183,22 +183,22 @@ func NewPipelinesPreview(client *client.DatabricksClient) *PipelinesPreviewAPI { // data quality with Delta Live Tables expectations. Expectations allow you to // define expected data quality and specify how to handle records that fail // those expectations. -type PipelinesPreviewAPI struct { - pipelinesPreviewImpl +type PipelinesAPI struct { + pipelinesImpl } // Delete a pipeline. // // Deletes a pipeline. -func (a *PipelinesPreviewAPI) DeleteByPipelineId(ctx context.Context, pipelineId string) error { - return a.pipelinesPreviewImpl.Delete(ctx, DeletePipelineRequest{ +func (a *PipelinesAPI) DeleteByPipelineId(ctx context.Context, pipelineId string) error { + return a.pipelinesImpl.Delete(ctx, DeletePipelineRequest{ PipelineId: pipelineId, }) } // Get a pipeline. -func (a *PipelinesPreviewAPI) GetByPipelineId(ctx context.Context, pipelineId string) (*GetPipelineResponse, error) { - return a.pipelinesPreviewImpl.Get(ctx, GetPipelineRequest{ +func (a *PipelinesAPI) GetByPipelineId(ctx context.Context, pipelineId string) (*GetPipelineResponse, error) { + return a.pipelinesImpl.Get(ctx, GetPipelineRequest{ PipelineId: pipelineId, }) } @@ -206,8 +206,8 @@ func (a *PipelinesPreviewAPI) GetByPipelineId(ctx context.Context, pipelineId st // Get pipeline permission levels. // // Gets the permission levels that a user can have on an object. -func (a *PipelinesPreviewAPI) GetPermissionLevelsByPipelineId(ctx context.Context, pipelineId string) (*GetPipelinePermissionLevelsResponse, error) { - return a.pipelinesPreviewImpl.GetPermissionLevels(ctx, GetPipelinePermissionLevelsRequest{ +func (a *PipelinesAPI) GetPermissionLevelsByPipelineId(ctx context.Context, pipelineId string) (*GetPipelinePermissionLevelsResponse, error) { + return a.pipelinesImpl.GetPermissionLevels(ctx, GetPipelinePermissionLevelsRequest{ PipelineId: pipelineId, }) } @@ -216,8 +216,8 @@ func (a *PipelinesPreviewAPI) GetPermissionLevelsByPipelineId(ctx context.Contex // // Gets the permissions of a pipeline. Pipelines can inherit permissions from // their root object. -func (a *PipelinesPreviewAPI) GetPermissionsByPipelineId(ctx context.Context, pipelineId string) (*PipelinePermissions, error) { - return a.pipelinesPreviewImpl.GetPermissions(ctx, GetPipelinePermissionsRequest{ +func (a *PipelinesAPI) GetPermissionsByPipelineId(ctx context.Context, pipelineId string) (*PipelinePermissions, error) { + return a.pipelinesImpl.GetPermissions(ctx, GetPipelinePermissionsRequest{ PipelineId: pipelineId, }) } @@ -225,8 +225,8 @@ func (a *PipelinesPreviewAPI) GetPermissionsByPipelineId(ctx context.Context, pi // Get a pipeline update. // // Gets an update from an active pipeline. -func (a *PipelinesPreviewAPI) GetUpdateByPipelineIdAndUpdateId(ctx context.Context, pipelineId string, updateId string) (*GetUpdateResponse, error) { - return a.pipelinesPreviewImpl.GetUpdate(ctx, GetUpdateRequest{ +func (a *PipelinesAPI) GetUpdateByPipelineIdAndUpdateId(ctx context.Context, pipelineId string, updateId string) (*GetUpdateResponse, error) { + return a.pipelinesImpl.GetUpdate(ctx, GetUpdateRequest{ PipelineId: pipelineId, UpdateId: updateId, }) @@ -235,20 +235,20 @@ func (a *PipelinesPreviewAPI) GetUpdateByPipelineIdAndUpdateId(ctx context.Conte // List pipeline events. // // Retrieves events for a pipeline. -func (a *PipelinesPreviewAPI) ListPipelineEventsByPipelineId(ctx context.Context, pipelineId string) (*ListPipelineEventsResponse, error) { - return a.pipelinesPreviewImpl.internalListPipelineEvents(ctx, ListPipelineEventsRequest{ +func (a *PipelinesAPI) ListPipelineEventsByPipelineId(ctx context.Context, pipelineId string) (*ListPipelineEventsResponse, error) { + return a.pipelinesImpl.internalListPipelineEvents(ctx, ListPipelineEventsRequest{ PipelineId: pipelineId, }) } -// PipelineStateInfoNameToPipelineIdMap calls [PipelinesPreviewAPI.ListPipelinesAll] and creates a map of results with [PipelineStateInfo].Name as key and [PipelineStateInfo].PipelineId as value. +// PipelineStateInfoNameToPipelineIdMap calls [PipelinesAPI.ListPipelinesAll] and creates a map of results with [PipelineStateInfo].Name as key and [PipelineStateInfo].PipelineId as value. // // Returns an error if there's more than one [PipelineStateInfo] with the same .Name. // // Note: All [PipelineStateInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *PipelinesPreviewAPI) PipelineStateInfoNameToPipelineIdMap(ctx context.Context, request ListPipelinesRequest) (map[string]string, error) { +func (a *PipelinesAPI) PipelineStateInfoNameToPipelineIdMap(ctx context.Context, request ListPipelinesRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListPipelinesAll(ctx, request) @@ -266,14 +266,14 @@ func (a *PipelinesPreviewAPI) PipelineStateInfoNameToPipelineIdMap(ctx context.C return mapping, nil } -// GetByName calls [PipelinesPreviewAPI.PipelineStateInfoNameToPipelineIdMap] and returns a single [PipelineStateInfo]. +// GetByName calls [PipelinesAPI.PipelineStateInfoNameToPipelineIdMap] and returns a single [PipelineStateInfo]. // // Returns an error if there's more than one [PipelineStateInfo] with the same .Name. // // Note: All [PipelineStateInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *PipelinesPreviewAPI) GetByName(ctx context.Context, name string) (*PipelineStateInfo, error) { +func (a *PipelinesAPI) GetByName(ctx context.Context, name string) (*PipelineStateInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListPipelinesAll(ctx, ListPipelinesRequest{}) if err != nil { @@ -297,8 +297,8 @@ func (a *PipelinesPreviewAPI) GetByName(ctx context.Context, name string) (*Pipe // List pipeline updates. // // List updates for an active pipeline. -func (a *PipelinesPreviewAPI) ListUpdatesByPipelineId(ctx context.Context, pipelineId string) (*ListUpdatesResponse, error) { - return a.pipelinesPreviewImpl.ListUpdates(ctx, ListUpdatesRequest{ +func (a *PipelinesAPI) ListUpdatesByPipelineId(ctx context.Context, pipelineId string) (*ListUpdatesResponse, error) { + return a.pipelinesImpl.ListUpdates(ctx, ListUpdatesRequest{ PipelineId: pipelineId, }) } diff --git a/pipelines/v2preview/client.go b/pipelines/v2preview/client.go index f3cb34b01..b449f58bc 100755 --- a/pipelines/v2preview/client.go +++ b/pipelines/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type PipelinesPreviewClient struct { - PipelinesPreviewInterface +type PipelinesClient struct { + PipelinesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewPipelinesPreviewClient(cfg *config.Config) (*PipelinesPreviewClient, error) { +func NewPipelinesClient(cfg *config.Config) (*PipelinesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,9 +37,9 @@ func NewPipelinesPreviewClient(cfg *config.Config) (*PipelinesPreviewClient, err return nil, err } - return &PipelinesPreviewClient{ - Config: cfg, - apiClient: apiClient, - PipelinesPreviewInterface: NewPipelinesPreview(databricksClient), + return &PipelinesClient{ + Config: cfg, + apiClient: apiClient, + PipelinesInterface: NewPipelines(databricksClient), }, nil } diff --git a/pipelines/v2preview/impl.go b/pipelines/v2preview/impl.go index 28ddfcf8c..37f649082 100755 --- a/pipelines/v2preview/impl.go +++ b/pipelines/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just PipelinesPreview API methods -type pipelinesPreviewImpl struct { +// unexported type that holds implementations of just Pipelines API methods +type pipelinesImpl struct { client *client.DatabricksClient } -func (a *pipelinesPreviewImpl) Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error) { +func (a *pipelinesImpl) Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error) { var createPipelineResponse CreatePipelineResponse path := "/api/2.0preview/pipelines" queryParams := make(map[string]any) @@ -28,7 +28,7 @@ func (a *pipelinesPreviewImpl) Create(ctx context.Context, request CreatePipelin return &createPipelineResponse, err } -func (a *pipelinesPreviewImpl) Delete(ctx context.Context, request DeletePipelineRequest) error { +func (a *pipelinesImpl) Delete(ctx context.Context, request DeletePipelineRequest) error { var deletePipelineResponse DeletePipelineResponse path := fmt.Sprintf("/api/2.0preview/pipelines/%v", request.PipelineId) queryParams := make(map[string]any) @@ -38,7 +38,7 @@ func (a *pipelinesPreviewImpl) Delete(ctx context.Context, request DeletePipelin return err } -func (a *pipelinesPreviewImpl) Get(ctx context.Context, request GetPipelineRequest) (*GetPipelineResponse, error) { +func (a *pipelinesImpl) Get(ctx context.Context, request GetPipelineRequest) (*GetPipelineResponse, error) { var getPipelineResponse GetPipelineResponse path := fmt.Sprintf("/api/2.0preview/pipelines/%v", request.PipelineId) queryParams := make(map[string]any) @@ -48,7 +48,7 @@ func (a *pipelinesPreviewImpl) Get(ctx context.Context, request GetPipelineReque return &getPipelineResponse, err } -func (a *pipelinesPreviewImpl) GetPermissionLevels(ctx context.Context, request GetPipelinePermissionLevelsRequest) (*GetPipelinePermissionLevelsResponse, error) { +func (a *pipelinesImpl) GetPermissionLevels(ctx context.Context, request GetPipelinePermissionLevelsRequest) (*GetPipelinePermissionLevelsResponse, error) { var getPipelinePermissionLevelsResponse GetPipelinePermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/pipelines/%v/permissionLevels", request.PipelineId) queryParams := make(map[string]any) @@ -58,7 +58,7 @@ func (a *pipelinesPreviewImpl) GetPermissionLevels(ctx context.Context, request return &getPipelinePermissionLevelsResponse, err } -func (a *pipelinesPreviewImpl) GetPermissions(ctx context.Context, request GetPipelinePermissionsRequest) (*PipelinePermissions, error) { +func (a *pipelinesImpl) GetPermissions(ctx context.Context, request GetPipelinePermissionsRequest) (*PipelinePermissions, error) { var pipelinePermissions PipelinePermissions path := fmt.Sprintf("/api/2.0preview/permissions/pipelines/%v", request.PipelineId) queryParams := make(map[string]any) @@ -68,7 +68,7 @@ func (a *pipelinesPreviewImpl) GetPermissions(ctx context.Context, request GetPi return &pipelinePermissions, err } -func (a *pipelinesPreviewImpl) GetUpdate(ctx context.Context, request GetUpdateRequest) (*GetUpdateResponse, error) { +func (a *pipelinesImpl) GetUpdate(ctx context.Context, request GetUpdateRequest) (*GetUpdateResponse, error) { var getUpdateResponse GetUpdateResponse path := fmt.Sprintf("/api/2.0preview/pipelines/%v/updates/%v", request.PipelineId, request.UpdateId) queryParams := make(map[string]any) @@ -81,7 +81,7 @@ func (a *pipelinesPreviewImpl) GetUpdate(ctx context.Context, request GetUpdateR // List pipeline events. // // Retrieves events for a pipeline. -func (a *pipelinesPreviewImpl) ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) listing.Iterator[PipelineEvent] { +func (a *pipelinesImpl) ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) listing.Iterator[PipelineEvent] { getNextPage := func(ctx context.Context, req ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -108,12 +108,12 @@ func (a *pipelinesPreviewImpl) ListPipelineEvents(ctx context.Context, request L // List pipeline events. // // Retrieves events for a pipeline. -func (a *pipelinesPreviewImpl) ListPipelineEventsAll(ctx context.Context, request ListPipelineEventsRequest) ([]PipelineEvent, error) { +func (a *pipelinesImpl) ListPipelineEventsAll(ctx context.Context, request ListPipelineEventsRequest) ([]PipelineEvent, error) { iterator := a.ListPipelineEvents(ctx, request) return listing.ToSliceN[PipelineEvent, int](ctx, iterator, request.MaxResults) } -func (a *pipelinesPreviewImpl) internalListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { +func (a *pipelinesImpl) internalListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { var listPipelineEventsResponse ListPipelineEventsResponse path := fmt.Sprintf("/api/2.0preview/pipelines/%v/events", request.PipelineId) queryParams := make(map[string]any) @@ -126,7 +126,7 @@ func (a *pipelinesPreviewImpl) internalListPipelineEvents(ctx context.Context, r // List pipelines. // // Lists pipelines defined in the Delta Live Tables system. -func (a *pipelinesPreviewImpl) ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo] { +func (a *pipelinesImpl) ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo] { getNextPage := func(ctx context.Context, req ListPipelinesRequest) (*ListPipelinesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -153,12 +153,12 @@ func (a *pipelinesPreviewImpl) ListPipelines(ctx context.Context, request ListPi // List pipelines. // // Lists pipelines defined in the Delta Live Tables system. -func (a *pipelinesPreviewImpl) ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) { +func (a *pipelinesImpl) ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) { iterator := a.ListPipelines(ctx, request) return listing.ToSliceN[PipelineStateInfo, int](ctx, iterator, request.MaxResults) } -func (a *pipelinesPreviewImpl) internalListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error) { +func (a *pipelinesImpl) internalListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error) { var listPipelinesResponse ListPipelinesResponse path := "/api/2.0preview/pipelines" queryParams := make(map[string]any) @@ -168,7 +168,7 @@ func (a *pipelinesPreviewImpl) internalListPipelines(ctx context.Context, reques return &listPipelinesResponse, err } -func (a *pipelinesPreviewImpl) ListUpdates(ctx context.Context, request ListUpdatesRequest) (*ListUpdatesResponse, error) { +func (a *pipelinesImpl) ListUpdates(ctx context.Context, request ListUpdatesRequest) (*ListUpdatesResponse, error) { var listUpdatesResponse ListUpdatesResponse path := fmt.Sprintf("/api/2.0preview/pipelines/%v/updates", request.PipelineId) queryParams := make(map[string]any) @@ -178,7 +178,7 @@ func (a *pipelinesPreviewImpl) ListUpdates(ctx context.Context, request ListUpda return &listUpdatesResponse, err } -func (a *pipelinesPreviewImpl) SetPermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) { +func (a *pipelinesImpl) SetPermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) { var pipelinePermissions PipelinePermissions path := fmt.Sprintf("/api/2.0preview/permissions/pipelines/%v", request.PipelineId) queryParams := make(map[string]any) @@ -189,7 +189,7 @@ func (a *pipelinesPreviewImpl) SetPermissions(ctx context.Context, request Pipel return &pipelinePermissions, err } -func (a *pipelinesPreviewImpl) StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error) { +func (a *pipelinesImpl) StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error) { var startUpdateResponse StartUpdateResponse path := fmt.Sprintf("/api/2.0preview/pipelines/%v/updates", request.PipelineId) queryParams := make(map[string]any) @@ -200,7 +200,7 @@ func (a *pipelinesPreviewImpl) StartUpdate(ctx context.Context, request StartUpd return &startUpdateResponse, err } -func (a *pipelinesPreviewImpl) Stop(ctx context.Context, request StopRequest) error { +func (a *pipelinesImpl) Stop(ctx context.Context, request StopRequest) error { var stopPipelineResponse StopPipelineResponse path := fmt.Sprintf("/api/2.0preview/pipelines/%v/stop", request.PipelineId) queryParams := make(map[string]any) @@ -210,7 +210,7 @@ func (a *pipelinesPreviewImpl) Stop(ctx context.Context, request StopRequest) er return err } -func (a *pipelinesPreviewImpl) Update(ctx context.Context, request EditPipeline) error { +func (a *pipelinesImpl) Update(ctx context.Context, request EditPipeline) error { var editPipelineResponse EditPipelineResponse path := fmt.Sprintf("/api/2.0preview/pipelines/%v", request.PipelineId) queryParams := make(map[string]any) @@ -221,7 +221,7 @@ func (a *pipelinesPreviewImpl) Update(ctx context.Context, request EditPipeline) return err } -func (a *pipelinesPreviewImpl) UpdatePermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) { +func (a *pipelinesImpl) UpdatePermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) { var pipelinePermissions PipelinePermissions path := fmt.Sprintf("/api/2.0preview/permissions/pipelines/%v", request.PipelineId) queryParams := make(map[string]any) diff --git a/provisioning/v2preview/api.go b/provisioning/v2preview/api.go index dc71f5ff8..250217ac4 100755 --- a/provisioning/v2preview/api.go +++ b/provisioning/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Credentials Preview, Encryption Keys Preview, Networks Preview, Private Access Preview, Storage Preview, Vpc Endpoints Preview, Workspaces Preview, etc. +// These APIs allow you to manage Credentials, Encryption Keys, Networks, Private Access, Storage, Vpc Endpoints, Workspaces, etc. package provisioningpreview import ( @@ -11,7 +11,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type CredentialsPreviewInterface interface { +type CredentialsInterface interface { // Create credential configuration. // @@ -63,7 +63,7 @@ type CredentialsPreviewInterface interface { // specified by ID. List(ctx context.Context) ([]Credential, error) - // CredentialCredentialsNameToCredentialsIdMap calls [CredentialsPreviewAPI.List] and creates a map of results with [Credential].CredentialsName as key and [Credential].CredentialsId as value. + // CredentialCredentialsNameToCredentialsIdMap calls [CredentialsAPI.List] and creates a map of results with [Credential].CredentialsName as key and [Credential].CredentialsId as value. // // Returns an error if there's more than one [Credential] with the same .CredentialsName. // @@ -72,7 +72,7 @@ type CredentialsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. CredentialCredentialsNameToCredentialsIdMap(ctx context.Context) (map[string]string, error) - // GetByCredentialsName calls [CredentialsPreviewAPI.CredentialCredentialsNameToCredentialsIdMap] and returns a single [Credential]. + // GetByCredentialsName calls [CredentialsAPI.CredentialCredentialsNameToCredentialsIdMap] and returns a single [Credential]. // // Returns an error if there's more than one [Credential] with the same .CredentialsName. // @@ -82,9 +82,9 @@ type CredentialsPreviewInterface interface { GetByCredentialsName(ctx context.Context, name string) (*Credential, error) } -func NewCredentialsPreview(client *client.DatabricksClient) *CredentialsPreviewAPI { - return &CredentialsPreviewAPI{ - credentialsPreviewImpl: credentialsPreviewImpl{ +func NewCredentials(client *client.DatabricksClient) *CredentialsAPI { + return &CredentialsAPI{ + credentialsImpl: credentialsImpl{ client: client, }, } @@ -95,8 +95,8 @@ func NewCredentialsPreview(client *client.DatabricksClient) *CredentialsPreviewA // Databricks can deploy clusters in the appropriate VPC for the new workspace. // A credential configuration encapsulates this role information, and its ID is // used when creating a new workspace. -type CredentialsPreviewAPI struct { - credentialsPreviewImpl +type CredentialsAPI struct { + credentialsImpl } // Delete credential configuration. @@ -104,8 +104,8 @@ type CredentialsPreviewAPI struct { // Deletes a Databricks credential configuration object for an account, both // specified by ID. You cannot delete a credential that is associated with any // workspace. -func (a *CredentialsPreviewAPI) DeleteByCredentialsId(ctx context.Context, credentialsId string) error { - return a.credentialsPreviewImpl.Delete(ctx, DeleteCredentialRequest{ +func (a *CredentialsAPI) DeleteByCredentialsId(ctx context.Context, credentialsId string) error { + return a.credentialsImpl.Delete(ctx, DeleteCredentialRequest{ CredentialsId: credentialsId, }) } @@ -114,20 +114,20 @@ func (a *CredentialsPreviewAPI) DeleteByCredentialsId(ctx context.Context, crede // // Gets a Databricks credential configuration object for an account, both // specified by ID. -func (a *CredentialsPreviewAPI) GetByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error) { - return a.credentialsPreviewImpl.Get(ctx, GetCredentialRequest{ +func (a *CredentialsAPI) GetByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error) { + return a.credentialsImpl.Get(ctx, GetCredentialRequest{ CredentialsId: credentialsId, }) } -// CredentialCredentialsNameToCredentialsIdMap calls [CredentialsPreviewAPI.List] and creates a map of results with [Credential].CredentialsName as key and [Credential].CredentialsId as value. +// CredentialCredentialsNameToCredentialsIdMap calls [CredentialsAPI.List] and creates a map of results with [Credential].CredentialsName as key and [Credential].CredentialsId as value. // // Returns an error if there's more than one [Credential] with the same .CredentialsName. // // Note: All [Credential] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *CredentialsPreviewAPI) CredentialCredentialsNameToCredentialsIdMap(ctx context.Context) (map[string]string, error) { +func (a *CredentialsAPI) CredentialCredentialsNameToCredentialsIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.List(ctx) @@ -145,14 +145,14 @@ func (a *CredentialsPreviewAPI) CredentialCredentialsNameToCredentialsIdMap(ctx return mapping, nil } -// GetByCredentialsName calls [CredentialsPreviewAPI.CredentialCredentialsNameToCredentialsIdMap] and returns a single [Credential]. +// GetByCredentialsName calls [CredentialsAPI.CredentialCredentialsNameToCredentialsIdMap] and returns a single [Credential]. // // Returns an error if there's more than one [Credential] with the same .CredentialsName. // // Note: All [Credential] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *CredentialsPreviewAPI) GetByCredentialsName(ctx context.Context, name string) (*Credential, error) { +func (a *CredentialsAPI) GetByCredentialsName(ctx context.Context, name string) (*Credential, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.List(ctx) if err != nil { @@ -173,7 +173,7 @@ func (a *CredentialsPreviewAPI) GetByCredentialsName(ctx context.Context, name s return &alternatives[0], nil } -type EncryptionKeysPreviewInterface interface { +type EncryptionKeysInterface interface { // Create encryption key configuration. // @@ -264,9 +264,9 @@ type EncryptionKeysPreviewInterface interface { List(ctx context.Context) ([]CustomerManagedKey, error) } -func NewEncryptionKeysPreview(client *client.DatabricksClient) *EncryptionKeysPreviewAPI { - return &EncryptionKeysPreviewAPI{ - encryptionKeysPreviewImpl: encryptionKeysPreviewImpl{ +func NewEncryptionKeys(client *client.DatabricksClient) *EncryptionKeysAPI { + return &EncryptionKeysAPI{ + encryptionKeysImpl: encryptionKeysImpl{ client: client, }, } @@ -288,16 +288,16 @@ func NewEncryptionKeysPreview(client *client.DatabricksClient) *EncryptionKeysPr // encryption requires that the workspace is on the E2 version of the platform. // If you have an older workspace, it might not be on the E2 version of the // platform. If you are not sure, contact your Databricks representative. -type EncryptionKeysPreviewAPI struct { - encryptionKeysPreviewImpl +type EncryptionKeysAPI struct { + encryptionKeysImpl } // Delete encryption key configuration. // // Deletes a customer-managed key configuration object for an account. You // cannot delete a configuration that is associated with a running workspace. -func (a *EncryptionKeysPreviewAPI) DeleteByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) error { - return a.encryptionKeysPreviewImpl.Delete(ctx, DeleteEncryptionKeyRequest{ +func (a *EncryptionKeysAPI) DeleteByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) error { + return a.encryptionKeysImpl.Delete(ctx, DeleteEncryptionKeyRequest{ CustomerManagedKeyId: customerManagedKeyId, }) } @@ -319,13 +319,13 @@ func (a *EncryptionKeysPreviewAPI) DeleteByCustomerManagedKeyId(ctx context.Cont // // This operation is available only if your account is on the E2 version of the // platform.", -func (a *EncryptionKeysPreviewAPI) GetByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error) { - return a.encryptionKeysPreviewImpl.Get(ctx, GetEncryptionKeyRequest{ +func (a *EncryptionKeysAPI) GetByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error) { + return a.encryptionKeysImpl.Get(ctx, GetEncryptionKeyRequest{ CustomerManagedKeyId: customerManagedKeyId, }) } -type NetworksPreviewInterface interface { +type NetworksInterface interface { // Create network configuration. // @@ -375,7 +375,7 @@ type NetworksPreviewInterface interface { // platform. List(ctx context.Context) ([]Network, error) - // NetworkNetworkNameToNetworkIdMap calls [NetworksPreviewAPI.List] and creates a map of results with [Network].NetworkName as key and [Network].NetworkId as value. + // NetworkNetworkNameToNetworkIdMap calls [NetworksAPI.List] and creates a map of results with [Network].NetworkName as key and [Network].NetworkId as value. // // Returns an error if there's more than one [Network] with the same .NetworkName. // @@ -384,7 +384,7 @@ type NetworksPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. NetworkNetworkNameToNetworkIdMap(ctx context.Context) (map[string]string, error) - // GetByNetworkName calls [NetworksPreviewAPI.NetworkNetworkNameToNetworkIdMap] and returns a single [Network]. + // GetByNetworkName calls [NetworksAPI.NetworkNetworkNameToNetworkIdMap] and returns a single [Network]. // // Returns an error if there's more than one [Network] with the same .NetworkName. // @@ -394,9 +394,9 @@ type NetworksPreviewInterface interface { GetByNetworkName(ctx context.Context, name string) (*Network, error) } -func NewNetworksPreview(client *client.DatabricksClient) *NetworksPreviewAPI { - return &NetworksPreviewAPI{ - networksPreviewImpl: networksPreviewImpl{ +func NewNetworks(client *client.DatabricksClient) *NetworksAPI { + return &NetworksAPI{ + networksImpl: networksImpl{ client: client, }, } @@ -405,8 +405,8 @@ func NewNetworksPreview(client *client.DatabricksClient) *NetworksPreviewAPI { // These APIs manage network configurations for customer-managed VPCs // (optional). Its ID is used when creating a new workspace if you use // customer-managed VPCs. -type NetworksPreviewAPI struct { - networksPreviewImpl +type NetworksAPI struct { + networksImpl } // Delete a network configuration. @@ -417,8 +417,8 @@ type NetworksPreviewAPI struct { // // This operation is available only if your account is on the E2 version of the // platform. -func (a *NetworksPreviewAPI) DeleteByNetworkId(ctx context.Context, networkId string) error { - return a.networksPreviewImpl.Delete(ctx, DeleteNetworkRequest{ +func (a *NetworksAPI) DeleteByNetworkId(ctx context.Context, networkId string) error { + return a.networksImpl.Delete(ctx, DeleteNetworkRequest{ NetworkId: networkId, }) } @@ -427,20 +427,20 @@ func (a *NetworksPreviewAPI) DeleteByNetworkId(ctx context.Context, networkId st // // Gets a Databricks network configuration, which represents a cloud VPC and its // resources. -func (a *NetworksPreviewAPI) GetByNetworkId(ctx context.Context, networkId string) (*Network, error) { - return a.networksPreviewImpl.Get(ctx, GetNetworkRequest{ +func (a *NetworksAPI) GetByNetworkId(ctx context.Context, networkId string) (*Network, error) { + return a.networksImpl.Get(ctx, GetNetworkRequest{ NetworkId: networkId, }) } -// NetworkNetworkNameToNetworkIdMap calls [NetworksPreviewAPI.List] and creates a map of results with [Network].NetworkName as key and [Network].NetworkId as value. +// NetworkNetworkNameToNetworkIdMap calls [NetworksAPI.List] and creates a map of results with [Network].NetworkName as key and [Network].NetworkId as value. // // Returns an error if there's more than one [Network] with the same .NetworkName. // // Note: All [Network] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *NetworksPreviewAPI) NetworkNetworkNameToNetworkIdMap(ctx context.Context) (map[string]string, error) { +func (a *NetworksAPI) NetworkNetworkNameToNetworkIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.List(ctx) @@ -458,14 +458,14 @@ func (a *NetworksPreviewAPI) NetworkNetworkNameToNetworkIdMap(ctx context.Contex return mapping, nil } -// GetByNetworkName calls [NetworksPreviewAPI.NetworkNetworkNameToNetworkIdMap] and returns a single [Network]. +// GetByNetworkName calls [NetworksAPI.NetworkNetworkNameToNetworkIdMap] and returns a single [Network]. // // Returns an error if there's more than one [Network] with the same .NetworkName. // // Note: All [Network] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *NetworksPreviewAPI) GetByNetworkName(ctx context.Context, name string) (*Network, error) { +func (a *NetworksAPI) GetByNetworkName(ctx context.Context, name string) (*Network, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.List(ctx) if err != nil { @@ -486,7 +486,7 @@ func (a *NetworksPreviewAPI) GetByNetworkName(ctx context.Context, name string) return &alternatives[0], nil } -type PrivateAccessPreviewInterface interface { +type PrivateAccessInterface interface { // Create private access settings. // @@ -561,7 +561,7 @@ type PrivateAccessPreviewInterface interface { // by ID. List(ctx context.Context) ([]PrivateAccessSettings, error) - // PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap calls [PrivateAccessPreviewAPI.List] and creates a map of results with [PrivateAccessSettings].PrivateAccessSettingsName as key and [PrivateAccessSettings].PrivateAccessSettingsId as value. + // PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap calls [PrivateAccessAPI.List] and creates a map of results with [PrivateAccessSettings].PrivateAccessSettingsName as key and [PrivateAccessSettings].PrivateAccessSettingsId as value. // // Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName. // @@ -570,7 +570,7 @@ type PrivateAccessPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx context.Context) (map[string]string, error) - // GetByPrivateAccessSettingsName calls [PrivateAccessPreviewAPI.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap] and returns a single [PrivateAccessSettings]. + // GetByPrivateAccessSettingsName calls [PrivateAccessAPI.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap] and returns a single [PrivateAccessSettings]. // // Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName. // @@ -605,17 +605,17 @@ type PrivateAccessPreviewInterface interface { Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error } -func NewPrivateAccessPreview(client *client.DatabricksClient) *PrivateAccessPreviewAPI { - return &PrivateAccessPreviewAPI{ - privateAccessPreviewImpl: privateAccessPreviewImpl{ +func NewPrivateAccess(client *client.DatabricksClient) *PrivateAccessAPI { + return &PrivateAccessAPI{ + privateAccessImpl: privateAccessImpl{ client: client, }, } } // These APIs manage private access settings for this account. -type PrivateAccessPreviewAPI struct { - privateAccessPreviewImpl +type PrivateAccessAPI struct { + privateAccessImpl } // Delete a private access settings object. @@ -628,8 +628,8 @@ type PrivateAccessPreviewAPI struct { // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html -func (a *PrivateAccessPreviewAPI) DeleteByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) error { - return a.privateAccessPreviewImpl.Delete(ctx, DeletePrivateAccesRequest{ +func (a *PrivateAccessAPI) DeleteByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) error { + return a.privateAccessImpl.Delete(ctx, DeletePrivateAccesRequest{ PrivateAccessSettingsId: privateAccessSettingsId, }) } @@ -644,20 +644,20 @@ func (a *PrivateAccessPreviewAPI) DeleteByPrivateAccessSettingsId(ctx context.Co // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html -func (a *PrivateAccessPreviewAPI) GetByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error) { - return a.privateAccessPreviewImpl.Get(ctx, GetPrivateAccesRequest{ +func (a *PrivateAccessAPI) GetByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error) { + return a.privateAccessImpl.Get(ctx, GetPrivateAccesRequest{ PrivateAccessSettingsId: privateAccessSettingsId, }) } -// PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap calls [PrivateAccessPreviewAPI.List] and creates a map of results with [PrivateAccessSettings].PrivateAccessSettingsName as key and [PrivateAccessSettings].PrivateAccessSettingsId as value. +// PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap calls [PrivateAccessAPI.List] and creates a map of results with [PrivateAccessSettings].PrivateAccessSettingsName as key and [PrivateAccessSettings].PrivateAccessSettingsId as value. // // Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName. // // Note: All [PrivateAccessSettings] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *PrivateAccessPreviewAPI) PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx context.Context) (map[string]string, error) { +func (a *PrivateAccessAPI) PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.List(ctx) @@ -675,14 +675,14 @@ func (a *PrivateAccessPreviewAPI) PrivateAccessSettingsPrivateAccessSettingsName return mapping, nil } -// GetByPrivateAccessSettingsName calls [PrivateAccessPreviewAPI.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap] and returns a single [PrivateAccessSettings]. +// GetByPrivateAccessSettingsName calls [PrivateAccessAPI.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap] and returns a single [PrivateAccessSettings]. // // Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName. // // Note: All [PrivateAccessSettings] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *PrivateAccessPreviewAPI) GetByPrivateAccessSettingsName(ctx context.Context, name string) (*PrivateAccessSettings, error) { +func (a *PrivateAccessAPI) GetByPrivateAccessSettingsName(ctx context.Context, name string) (*PrivateAccessSettings, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.List(ctx) if err != nil { @@ -703,7 +703,7 @@ func (a *PrivateAccessPreviewAPI) GetByPrivateAccessSettingsName(ctx context.Con return &alternatives[0], nil } -type StoragePreviewInterface interface { +type StorageInterface interface { // Create new storage configuration. // @@ -747,7 +747,7 @@ type StoragePreviewInterface interface { // specified by ID. List(ctx context.Context) ([]StorageConfiguration, error) - // StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap calls [StoragePreviewAPI.List] and creates a map of results with [StorageConfiguration].StorageConfigurationName as key and [StorageConfiguration].StorageConfigurationId as value. + // StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap calls [StorageAPI.List] and creates a map of results with [StorageConfiguration].StorageConfigurationName as key and [StorageConfiguration].StorageConfigurationId as value. // // Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName. // @@ -756,7 +756,7 @@ type StoragePreviewInterface interface { // This method is generated by Databricks SDK Code Generator. StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx context.Context) (map[string]string, error) - // GetByStorageConfigurationName calls [StoragePreviewAPI.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap] and returns a single [StorageConfiguration]. + // GetByStorageConfigurationName calls [StorageAPI.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap] and returns a single [StorageConfiguration]. // // Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName. // @@ -766,9 +766,9 @@ type StoragePreviewInterface interface { GetByStorageConfigurationName(ctx context.Context, name string) (*StorageConfiguration, error) } -func NewStoragePreview(client *client.DatabricksClient) *StoragePreviewAPI { - return &StoragePreviewAPI{ - storagePreviewImpl: storagePreviewImpl{ +func NewStorage(client *client.DatabricksClient) *StorageAPI { + return &StorageAPI{ + storageImpl: storageImpl{ client: client, }, } @@ -780,16 +780,16 @@ func NewStoragePreview(client *client.DatabricksClient) *StoragePreviewAPI { // bucket for storage of non-production DBFS data. A storage configuration // encapsulates this bucket information, and its ID is used when creating a new // workspace. -type StoragePreviewAPI struct { - storagePreviewImpl +type StorageAPI struct { + storageImpl } // Delete storage configuration. // // Deletes a Databricks storage configuration. You cannot delete a storage // configuration that is associated with any workspace. -func (a *StoragePreviewAPI) DeleteByStorageConfigurationId(ctx context.Context, storageConfigurationId string) error { - return a.storagePreviewImpl.Delete(ctx, DeleteStorageRequest{ +func (a *StorageAPI) DeleteByStorageConfigurationId(ctx context.Context, storageConfigurationId string) error { + return a.storageImpl.Delete(ctx, DeleteStorageRequest{ StorageConfigurationId: storageConfigurationId, }) } @@ -797,20 +797,20 @@ func (a *StoragePreviewAPI) DeleteByStorageConfigurationId(ctx context.Context, // Get storage configuration. // // Gets a Databricks storage configuration for an account, both specified by ID. -func (a *StoragePreviewAPI) GetByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error) { - return a.storagePreviewImpl.Get(ctx, GetStorageRequest{ +func (a *StorageAPI) GetByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error) { + return a.storageImpl.Get(ctx, GetStorageRequest{ StorageConfigurationId: storageConfigurationId, }) } -// StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap calls [StoragePreviewAPI.List] and creates a map of results with [StorageConfiguration].StorageConfigurationName as key and [StorageConfiguration].StorageConfigurationId as value. +// StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap calls [StorageAPI.List] and creates a map of results with [StorageConfiguration].StorageConfigurationName as key and [StorageConfiguration].StorageConfigurationId as value. // // Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName. // // Note: All [StorageConfiguration] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *StoragePreviewAPI) StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx context.Context) (map[string]string, error) { +func (a *StorageAPI) StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.List(ctx) @@ -828,14 +828,14 @@ func (a *StoragePreviewAPI) StorageConfigurationStorageConfigurationNameToStorag return mapping, nil } -// GetByStorageConfigurationName calls [StoragePreviewAPI.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap] and returns a single [StorageConfiguration]. +// GetByStorageConfigurationName calls [StorageAPI.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap] and returns a single [StorageConfiguration]. // // Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName. // // Note: All [StorageConfiguration] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *StoragePreviewAPI) GetByStorageConfigurationName(ctx context.Context, name string) (*StorageConfiguration, error) { +func (a *StorageAPI) GetByStorageConfigurationName(ctx context.Context, name string) (*StorageConfiguration, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.List(ctx) if err != nil { @@ -856,7 +856,7 @@ func (a *StoragePreviewAPI) GetByStorageConfigurationName(ctx context.Context, n return &alternatives[0], nil } -type VpcEndpointsPreviewInterface interface { +type VpcEndpointsInterface interface { // Create VPC endpoint configuration. // @@ -930,7 +930,7 @@ type VpcEndpointsPreviewInterface interface { // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html List(ctx context.Context) ([]VpcEndpoint, error) - // VpcEndpointVpcEndpointNameToVpcEndpointIdMap calls [VpcEndpointsPreviewAPI.List] and creates a map of results with [VpcEndpoint].VpcEndpointName as key and [VpcEndpoint].VpcEndpointId as value. + // VpcEndpointVpcEndpointNameToVpcEndpointIdMap calls [VpcEndpointsAPI.List] and creates a map of results with [VpcEndpoint].VpcEndpointName as key and [VpcEndpoint].VpcEndpointId as value. // // Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName. // @@ -939,7 +939,7 @@ type VpcEndpointsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx context.Context) (map[string]string, error) - // GetByVpcEndpointName calls [VpcEndpointsPreviewAPI.VpcEndpointVpcEndpointNameToVpcEndpointIdMap] and returns a single [VpcEndpoint]. + // GetByVpcEndpointName calls [VpcEndpointsAPI.VpcEndpointVpcEndpointNameToVpcEndpointIdMap] and returns a single [VpcEndpoint]. // // Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName. // @@ -949,17 +949,17 @@ type VpcEndpointsPreviewInterface interface { GetByVpcEndpointName(ctx context.Context, name string) (*VpcEndpoint, error) } -func NewVpcEndpointsPreview(client *client.DatabricksClient) *VpcEndpointsPreviewAPI { - return &VpcEndpointsPreviewAPI{ - vpcEndpointsPreviewImpl: vpcEndpointsPreviewImpl{ +func NewVpcEndpoints(client *client.DatabricksClient) *VpcEndpointsAPI { + return &VpcEndpointsAPI{ + vpcEndpointsImpl: vpcEndpointsImpl{ client: client, }, } } // These APIs manage VPC endpoint configurations for this account. -type VpcEndpointsPreviewAPI struct { - vpcEndpointsPreviewImpl +type VpcEndpointsAPI struct { + vpcEndpointsImpl } // Delete VPC endpoint configuration. @@ -973,8 +973,8 @@ type VpcEndpointsPreviewAPI struct { // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html -func (a *VpcEndpointsPreviewAPI) DeleteByVpcEndpointId(ctx context.Context, vpcEndpointId string) error { - return a.vpcEndpointsPreviewImpl.Delete(ctx, DeleteVpcEndpointRequest{ +func (a *VpcEndpointsAPI) DeleteByVpcEndpointId(ctx context.Context, vpcEndpointId string) error { + return a.vpcEndpointsImpl.Delete(ctx, DeleteVpcEndpointRequest{ VpcEndpointId: vpcEndpointId, }) } @@ -986,20 +986,20 @@ func (a *VpcEndpointsPreviewAPI) DeleteByVpcEndpointId(ctx context.Context, vpcE // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html -func (a *VpcEndpointsPreviewAPI) GetByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error) { - return a.vpcEndpointsPreviewImpl.Get(ctx, GetVpcEndpointRequest{ +func (a *VpcEndpointsAPI) GetByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error) { + return a.vpcEndpointsImpl.Get(ctx, GetVpcEndpointRequest{ VpcEndpointId: vpcEndpointId, }) } -// VpcEndpointVpcEndpointNameToVpcEndpointIdMap calls [VpcEndpointsPreviewAPI.List] and creates a map of results with [VpcEndpoint].VpcEndpointName as key and [VpcEndpoint].VpcEndpointId as value. +// VpcEndpointVpcEndpointNameToVpcEndpointIdMap calls [VpcEndpointsAPI.List] and creates a map of results with [VpcEndpoint].VpcEndpointName as key and [VpcEndpoint].VpcEndpointId as value. // // Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName. // // Note: All [VpcEndpoint] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *VpcEndpointsPreviewAPI) VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx context.Context) (map[string]string, error) { +func (a *VpcEndpointsAPI) VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.List(ctx) @@ -1017,14 +1017,14 @@ func (a *VpcEndpointsPreviewAPI) VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ct return mapping, nil } -// GetByVpcEndpointName calls [VpcEndpointsPreviewAPI.VpcEndpointVpcEndpointNameToVpcEndpointIdMap] and returns a single [VpcEndpoint]. +// GetByVpcEndpointName calls [VpcEndpointsAPI.VpcEndpointVpcEndpointNameToVpcEndpointIdMap] and returns a single [VpcEndpoint]. // // Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName. // // Note: All [VpcEndpoint] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *VpcEndpointsPreviewAPI) GetByVpcEndpointName(ctx context.Context, name string) (*VpcEndpoint, error) { +func (a *VpcEndpointsAPI) GetByVpcEndpointName(ctx context.Context, name string) (*VpcEndpoint, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.List(ctx) if err != nil { @@ -1045,7 +1045,7 @@ func (a *VpcEndpointsPreviewAPI) GetByVpcEndpointName(ctx context.Context, name return &alternatives[0], nil } -type WorkspacesPreviewInterface interface { +type WorkspacesInterface interface { // Create a new workspace. // @@ -1129,7 +1129,7 @@ type WorkspacesPreviewInterface interface { // account. List(ctx context.Context) ([]Workspace, error) - // WorkspaceWorkspaceNameToWorkspaceIdMap calls [WorkspacesPreviewAPI.List] and creates a map of results with [Workspace].WorkspaceName as key and [Workspace].WorkspaceId as value. + // WorkspaceWorkspaceNameToWorkspaceIdMap calls [WorkspacesAPI.List] and creates a map of results with [Workspace].WorkspaceName as key and [Workspace].WorkspaceId as value. // // Returns an error if there's more than one [Workspace] with the same .WorkspaceName. // @@ -1138,7 +1138,7 @@ type WorkspacesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. WorkspaceWorkspaceNameToWorkspaceIdMap(ctx context.Context) (map[string]int64, error) - // GetByWorkspaceName calls [WorkspacesPreviewAPI.WorkspaceWorkspaceNameToWorkspaceIdMap] and returns a single [Workspace]. + // GetByWorkspaceName calls [WorkspacesAPI.WorkspaceWorkspaceNameToWorkspaceIdMap] and returns a single [Workspace]. // // Returns an error if there's more than one [Workspace] with the same .WorkspaceName. // @@ -1272,9 +1272,9 @@ type WorkspacesPreviewInterface interface { Update(ctx context.Context, request UpdateWorkspaceRequest) error } -func NewWorkspacesPreview(client *client.DatabricksClient) *WorkspacesPreviewAPI { - return &WorkspacesPreviewAPI{ - workspacesPreviewImpl: workspacesPreviewImpl{ +func NewWorkspaces(client *client.DatabricksClient) *WorkspacesAPI { + return &WorkspacesAPI{ + workspacesImpl: workspacesImpl{ client: client, }, } @@ -1289,8 +1289,8 @@ func NewWorkspacesPreview(client *client.DatabricksClient) *WorkspacesPreviewAPI // These endpoints are available if your account is on the E2 version of the // platform or on a select custom plan that allows multiple workspaces per // account. -type WorkspacesPreviewAPI struct { - workspacesPreviewImpl +type WorkspacesAPI struct { + workspacesImpl } // Delete a workspace. @@ -1303,8 +1303,8 @@ type WorkspacesPreviewAPI struct { // This operation is available only if your account is on the E2 version of the // platform or on a select custom plan that allows multiple workspaces per // account. -func (a *WorkspacesPreviewAPI) DeleteByWorkspaceId(ctx context.Context, workspaceId int64) error { - return a.workspacesPreviewImpl.Delete(ctx, DeleteWorkspaceRequest{ +func (a *WorkspacesAPI) DeleteByWorkspaceId(ctx context.Context, workspaceId int64) error { + return a.workspacesImpl.Delete(ctx, DeleteWorkspaceRequest{ WorkspaceId: workspaceId, }) } @@ -1325,20 +1325,20 @@ func (a *WorkspacesPreviewAPI) DeleteByWorkspaceId(ctx context.Context, workspac // account. // // [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html -func (a *WorkspacesPreviewAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error) { - return a.workspacesPreviewImpl.Get(ctx, GetWorkspaceRequest{ +func (a *WorkspacesAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error) { + return a.workspacesImpl.Get(ctx, GetWorkspaceRequest{ WorkspaceId: workspaceId, }) } -// WorkspaceWorkspaceNameToWorkspaceIdMap calls [WorkspacesPreviewAPI.List] and creates a map of results with [Workspace].WorkspaceName as key and [Workspace].WorkspaceId as value. +// WorkspaceWorkspaceNameToWorkspaceIdMap calls [WorkspacesAPI.List] and creates a map of results with [Workspace].WorkspaceName as key and [Workspace].WorkspaceId as value. // // Returns an error if there's more than one [Workspace] with the same .WorkspaceName. // // Note: All [Workspace] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *WorkspacesPreviewAPI) WorkspaceWorkspaceNameToWorkspaceIdMap(ctx context.Context) (map[string]int64, error) { +func (a *WorkspacesAPI) WorkspaceWorkspaceNameToWorkspaceIdMap(ctx context.Context) (map[string]int64, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]int64{} result, err := a.List(ctx) @@ -1356,14 +1356,14 @@ func (a *WorkspacesPreviewAPI) WorkspaceWorkspaceNameToWorkspaceIdMap(ctx contex return mapping, nil } -// GetByWorkspaceName calls [WorkspacesPreviewAPI.WorkspaceWorkspaceNameToWorkspaceIdMap] and returns a single [Workspace]. +// GetByWorkspaceName calls [WorkspacesAPI.WorkspaceWorkspaceNameToWorkspaceIdMap] and returns a single [Workspace]. // // Returns an error if there's more than one [Workspace] with the same .WorkspaceName. // // Note: All [Workspace] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *WorkspacesPreviewAPI) GetByWorkspaceName(ctx context.Context, name string) (*Workspace, error) { +func (a *WorkspacesAPI) GetByWorkspaceName(ctx context.Context, name string) (*Workspace, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.List(ctx) if err != nil { diff --git a/provisioning/v2preview/client.go b/provisioning/v2preview/client.go index a70795a98..59ba387c2 100755 --- a/provisioning/v2preview/client.go +++ b/provisioning/v2preview/client.go @@ -9,13 +9,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/config" ) -type CredentialsPreviewClient struct { - CredentialsPreviewInterface +type CredentialsClient struct { + CredentialsInterface Config *config.Config } -func NewCredentialsPreviewClient(cfg *config.Config) (*CredentialsPreviewClient, error) { +func NewCredentialsClient(cfg *config.Config) (*CredentialsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -33,19 +33,19 @@ func NewCredentialsPreviewClient(cfg *config.Config) (*CredentialsPreviewClient, return nil, err } - return &CredentialsPreviewClient{ - Config: cfg, - CredentialsPreviewInterface: NewCredentialsPreview(apiClient), + return &CredentialsClient{ + Config: cfg, + CredentialsInterface: NewCredentials(apiClient), }, nil } -type EncryptionKeysPreviewClient struct { - EncryptionKeysPreviewInterface +type EncryptionKeysClient struct { + EncryptionKeysInterface Config *config.Config } -func NewEncryptionKeysPreviewClient(cfg *config.Config) (*EncryptionKeysPreviewClient, error) { +func NewEncryptionKeysClient(cfg *config.Config) (*EncryptionKeysClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -63,19 +63,19 @@ func NewEncryptionKeysPreviewClient(cfg *config.Config) (*EncryptionKeysPreviewC return nil, err } - return &EncryptionKeysPreviewClient{ - Config: cfg, - EncryptionKeysPreviewInterface: NewEncryptionKeysPreview(apiClient), + return &EncryptionKeysClient{ + Config: cfg, + EncryptionKeysInterface: NewEncryptionKeys(apiClient), }, nil } -type NetworksPreviewClient struct { - NetworksPreviewInterface +type NetworksClient struct { + NetworksInterface Config *config.Config } -func NewNetworksPreviewClient(cfg *config.Config) (*NetworksPreviewClient, error) { +func NewNetworksClient(cfg *config.Config) (*NetworksClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -93,19 +93,19 @@ func NewNetworksPreviewClient(cfg *config.Config) (*NetworksPreviewClient, error return nil, err } - return &NetworksPreviewClient{ - Config: cfg, - NetworksPreviewInterface: NewNetworksPreview(apiClient), + return &NetworksClient{ + Config: cfg, + NetworksInterface: NewNetworks(apiClient), }, nil } -type PrivateAccessPreviewClient struct { - PrivateAccessPreviewInterface +type PrivateAccessClient struct { + PrivateAccessInterface Config *config.Config } -func NewPrivateAccessPreviewClient(cfg *config.Config) (*PrivateAccessPreviewClient, error) { +func NewPrivateAccessClient(cfg *config.Config) (*PrivateAccessClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -123,19 +123,19 @@ func NewPrivateAccessPreviewClient(cfg *config.Config) (*PrivateAccessPreviewCli return nil, err } - return &PrivateAccessPreviewClient{ - Config: cfg, - PrivateAccessPreviewInterface: NewPrivateAccessPreview(apiClient), + return &PrivateAccessClient{ + Config: cfg, + PrivateAccessInterface: NewPrivateAccess(apiClient), }, nil } -type StoragePreviewClient struct { - StoragePreviewInterface +type StorageClient struct { + StorageInterface Config *config.Config } -func NewStoragePreviewClient(cfg *config.Config) (*StoragePreviewClient, error) { +func NewStorageClient(cfg *config.Config) (*StorageClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -153,19 +153,19 @@ func NewStoragePreviewClient(cfg *config.Config) (*StoragePreviewClient, error) return nil, err } - return &StoragePreviewClient{ - Config: cfg, - StoragePreviewInterface: NewStoragePreview(apiClient), + return &StorageClient{ + Config: cfg, + StorageInterface: NewStorage(apiClient), }, nil } -type VpcEndpointsPreviewClient struct { - VpcEndpointsPreviewInterface +type VpcEndpointsClient struct { + VpcEndpointsInterface Config *config.Config } -func NewVpcEndpointsPreviewClient(cfg *config.Config) (*VpcEndpointsPreviewClient, error) { +func NewVpcEndpointsClient(cfg *config.Config) (*VpcEndpointsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -183,19 +183,19 @@ func NewVpcEndpointsPreviewClient(cfg *config.Config) (*VpcEndpointsPreviewClien return nil, err } - return &VpcEndpointsPreviewClient{ - Config: cfg, - VpcEndpointsPreviewInterface: NewVpcEndpointsPreview(apiClient), + return &VpcEndpointsClient{ + Config: cfg, + VpcEndpointsInterface: NewVpcEndpoints(apiClient), }, nil } -type WorkspacesPreviewClient struct { - WorkspacesPreviewInterface +type WorkspacesClient struct { + WorkspacesInterface Config *config.Config } -func NewWorkspacesPreviewClient(cfg *config.Config) (*WorkspacesPreviewClient, error) { +func NewWorkspacesClient(cfg *config.Config) (*WorkspacesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -213,8 +213,8 @@ func NewWorkspacesPreviewClient(cfg *config.Config) (*WorkspacesPreviewClient, e return nil, err } - return &WorkspacesPreviewClient{ - Config: cfg, - WorkspacesPreviewInterface: NewWorkspacesPreview(apiClient), + return &WorkspacesClient{ + Config: cfg, + WorkspacesInterface: NewWorkspaces(apiClient), }, nil } diff --git a/provisioning/v2preview/impl.go b/provisioning/v2preview/impl.go index f8f9b1689..77e69e656 100755 --- a/provisioning/v2preview/impl.go +++ b/provisioning/v2preview/impl.go @@ -10,12 +10,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/client" ) -// unexported type that holds implementations of just CredentialsPreview API methods -type credentialsPreviewImpl struct { +// unexported type that holds implementations of just Credentials API methods +type credentialsImpl struct { client *client.DatabricksClient } -func (a *credentialsPreviewImpl) Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error) { +func (a *credentialsImpl) Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error) { var credential Credential path := fmt.Sprintf("/api/2.0preview/accounts/%v/credentials", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -26,7 +26,7 @@ func (a *credentialsPreviewImpl) Create(ctx context.Context, request CreateCrede return &credential, err } -func (a *credentialsPreviewImpl) Delete(ctx context.Context, request DeleteCredentialRequest) error { +func (a *credentialsImpl) Delete(ctx context.Context, request DeleteCredentialRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/credentials/%v", a.client.ConfiguredAccountID(), request.CredentialsId) queryParams := make(map[string]any) @@ -36,7 +36,7 @@ func (a *credentialsPreviewImpl) Delete(ctx context.Context, request DeleteCrede return err } -func (a *credentialsPreviewImpl) Get(ctx context.Context, request GetCredentialRequest) (*Credential, error) { +func (a *credentialsImpl) Get(ctx context.Context, request GetCredentialRequest) (*Credential, error) { var credential Credential path := fmt.Sprintf("/api/2.0preview/accounts/%v/credentials/%v", a.client.ConfiguredAccountID(), request.CredentialsId) queryParams := make(map[string]any) @@ -46,7 +46,7 @@ func (a *credentialsPreviewImpl) Get(ctx context.Context, request GetCredentialR return &credential, err } -func (a *credentialsPreviewImpl) List(ctx context.Context) ([]Credential, error) { +func (a *credentialsImpl) List(ctx context.Context) ([]Credential, error) { var credentialList []Credential path := fmt.Sprintf("/api/2.0preview/accounts/%v/credentials", a.client.ConfiguredAccountID()) @@ -56,12 +56,12 @@ func (a *credentialsPreviewImpl) List(ctx context.Context) ([]Credential, error) return credentialList, err } -// unexported type that holds implementations of just EncryptionKeysPreview API methods -type encryptionKeysPreviewImpl struct { +// unexported type that holds implementations of just EncryptionKeys API methods +type encryptionKeysImpl struct { client *client.DatabricksClient } -func (a *encryptionKeysPreviewImpl) Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error) { +func (a *encryptionKeysImpl) Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error) { var customerManagedKey CustomerManagedKey path := fmt.Sprintf("/api/2.0preview/accounts/%v/customer-managed-keys", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -72,7 +72,7 @@ func (a *encryptionKeysPreviewImpl) Create(ctx context.Context, request CreateCu return &customerManagedKey, err } -func (a *encryptionKeysPreviewImpl) Delete(ctx context.Context, request DeleteEncryptionKeyRequest) error { +func (a *encryptionKeysImpl) Delete(ctx context.Context, request DeleteEncryptionKeyRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/customer-managed-keys/%v", a.client.ConfiguredAccountID(), request.CustomerManagedKeyId) queryParams := make(map[string]any) @@ -82,7 +82,7 @@ func (a *encryptionKeysPreviewImpl) Delete(ctx context.Context, request DeleteEn return err } -func (a *encryptionKeysPreviewImpl) Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error) { +func (a *encryptionKeysImpl) Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error) { var customerManagedKey CustomerManagedKey path := fmt.Sprintf("/api/2.0preview/accounts/%v/customer-managed-keys/%v", a.client.ConfiguredAccountID(), request.CustomerManagedKeyId) queryParams := make(map[string]any) @@ -92,7 +92,7 @@ func (a *encryptionKeysPreviewImpl) Get(ctx context.Context, request GetEncrypti return &customerManagedKey, err } -func (a *encryptionKeysPreviewImpl) List(ctx context.Context) ([]CustomerManagedKey, error) { +func (a *encryptionKeysImpl) List(ctx context.Context) ([]CustomerManagedKey, error) { var customerManagedKeyList []CustomerManagedKey path := fmt.Sprintf("/api/2.0preview/accounts/%v/customer-managed-keys", a.client.ConfiguredAccountID()) @@ -102,12 +102,12 @@ func (a *encryptionKeysPreviewImpl) List(ctx context.Context) ([]CustomerManaged return customerManagedKeyList, err } -// unexported type that holds implementations of just NetworksPreview API methods -type networksPreviewImpl struct { +// unexported type that holds implementations of just Networks API methods +type networksImpl struct { client *client.DatabricksClient } -func (a *networksPreviewImpl) Create(ctx context.Context, request CreateNetworkRequest) (*Network, error) { +func (a *networksImpl) Create(ctx context.Context, request CreateNetworkRequest) (*Network, error) { var network Network path := fmt.Sprintf("/api/2.0preview/accounts/%v/networks", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -118,7 +118,7 @@ func (a *networksPreviewImpl) Create(ctx context.Context, request CreateNetworkR return &network, err } -func (a *networksPreviewImpl) Delete(ctx context.Context, request DeleteNetworkRequest) error { +func (a *networksImpl) Delete(ctx context.Context, request DeleteNetworkRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/networks/%v", a.client.ConfiguredAccountID(), request.NetworkId) queryParams := make(map[string]any) @@ -128,7 +128,7 @@ func (a *networksPreviewImpl) Delete(ctx context.Context, request DeleteNetworkR return err } -func (a *networksPreviewImpl) Get(ctx context.Context, request GetNetworkRequest) (*Network, error) { +func (a *networksImpl) Get(ctx context.Context, request GetNetworkRequest) (*Network, error) { var network Network path := fmt.Sprintf("/api/2.0preview/accounts/%v/networks/%v", a.client.ConfiguredAccountID(), request.NetworkId) queryParams := make(map[string]any) @@ -138,7 +138,7 @@ func (a *networksPreviewImpl) Get(ctx context.Context, request GetNetworkRequest return &network, err } -func (a *networksPreviewImpl) List(ctx context.Context) ([]Network, error) { +func (a *networksImpl) List(ctx context.Context) ([]Network, error) { var networkList []Network path := fmt.Sprintf("/api/2.0preview/accounts/%v/networks", a.client.ConfiguredAccountID()) @@ -148,12 +148,12 @@ func (a *networksPreviewImpl) List(ctx context.Context) ([]Network, error) { return networkList, err } -// unexported type that holds implementations of just PrivateAccessPreview API methods -type privateAccessPreviewImpl struct { +// unexported type that holds implementations of just PrivateAccess API methods +type privateAccessImpl struct { client *client.DatabricksClient } -func (a *privateAccessPreviewImpl) Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error) { +func (a *privateAccessImpl) Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error) { var privateAccessSettings PrivateAccessSettings path := fmt.Sprintf("/api/2.0preview/accounts/%v/private-access-settings", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -164,7 +164,7 @@ func (a *privateAccessPreviewImpl) Create(ctx context.Context, request UpsertPri return &privateAccessSettings, err } -func (a *privateAccessPreviewImpl) Delete(ctx context.Context, request DeletePrivateAccesRequest) error { +func (a *privateAccessImpl) Delete(ctx context.Context, request DeletePrivateAccesRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/private-access-settings/%v", a.client.ConfiguredAccountID(), request.PrivateAccessSettingsId) queryParams := make(map[string]any) @@ -174,7 +174,7 @@ func (a *privateAccessPreviewImpl) Delete(ctx context.Context, request DeletePri return err } -func (a *privateAccessPreviewImpl) Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error) { +func (a *privateAccessImpl) Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error) { var privateAccessSettings PrivateAccessSettings path := fmt.Sprintf("/api/2.0preview/accounts/%v/private-access-settings/%v", a.client.ConfiguredAccountID(), request.PrivateAccessSettingsId) queryParams := make(map[string]any) @@ -184,7 +184,7 @@ func (a *privateAccessPreviewImpl) Get(ctx context.Context, request GetPrivateAc return &privateAccessSettings, err } -func (a *privateAccessPreviewImpl) List(ctx context.Context) ([]PrivateAccessSettings, error) { +func (a *privateAccessImpl) List(ctx context.Context) ([]PrivateAccessSettings, error) { var privateAccessSettingsList []PrivateAccessSettings path := fmt.Sprintf("/api/2.0preview/accounts/%v/private-access-settings", a.client.ConfiguredAccountID()) @@ -194,7 +194,7 @@ func (a *privateAccessPreviewImpl) List(ctx context.Context) ([]PrivateAccessSet return privateAccessSettingsList, err } -func (a *privateAccessPreviewImpl) Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error { +func (a *privateAccessImpl) Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error { var replaceResponse ReplaceResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/private-access-settings/%v", a.client.ConfiguredAccountID(), request.PrivateAccessSettingsId) queryParams := make(map[string]any) @@ -205,12 +205,12 @@ func (a *privateAccessPreviewImpl) Replace(ctx context.Context, request UpsertPr return err } -// unexported type that holds implementations of just StoragePreview API methods -type storagePreviewImpl struct { +// unexported type that holds implementations of just Storage API methods +type storageImpl struct { client *client.DatabricksClient } -func (a *storagePreviewImpl) Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error) { +func (a *storageImpl) Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error) { var storageConfiguration StorageConfiguration path := fmt.Sprintf("/api/2.0preview/accounts/%v/storage-configurations", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -221,7 +221,7 @@ func (a *storagePreviewImpl) Create(ctx context.Context, request CreateStorageCo return &storageConfiguration, err } -func (a *storagePreviewImpl) Delete(ctx context.Context, request DeleteStorageRequest) error { +func (a *storageImpl) Delete(ctx context.Context, request DeleteStorageRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/storage-configurations/%v", a.client.ConfiguredAccountID(), request.StorageConfigurationId) queryParams := make(map[string]any) @@ -231,7 +231,7 @@ func (a *storagePreviewImpl) Delete(ctx context.Context, request DeleteStorageRe return err } -func (a *storagePreviewImpl) Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error) { +func (a *storageImpl) Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error) { var storageConfiguration StorageConfiguration path := fmt.Sprintf("/api/2.0preview/accounts/%v/storage-configurations/%v", a.client.ConfiguredAccountID(), request.StorageConfigurationId) queryParams := make(map[string]any) @@ -241,7 +241,7 @@ func (a *storagePreviewImpl) Get(ctx context.Context, request GetStorageRequest) return &storageConfiguration, err } -func (a *storagePreviewImpl) List(ctx context.Context) ([]StorageConfiguration, error) { +func (a *storageImpl) List(ctx context.Context) ([]StorageConfiguration, error) { var storageConfigurationList []StorageConfiguration path := fmt.Sprintf("/api/2.0preview/accounts/%v/storage-configurations", a.client.ConfiguredAccountID()) @@ -251,12 +251,12 @@ func (a *storagePreviewImpl) List(ctx context.Context) ([]StorageConfiguration, return storageConfigurationList, err } -// unexported type that holds implementations of just VpcEndpointsPreview API methods -type vpcEndpointsPreviewImpl struct { +// unexported type that holds implementations of just VpcEndpoints API methods +type vpcEndpointsImpl struct { client *client.DatabricksClient } -func (a *vpcEndpointsPreviewImpl) Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error) { +func (a *vpcEndpointsImpl) Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error) { var vpcEndpoint VpcEndpoint path := fmt.Sprintf("/api/2.0preview/accounts/%v/vpc-endpoints", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -267,7 +267,7 @@ func (a *vpcEndpointsPreviewImpl) Create(ctx context.Context, request CreateVpcE return &vpcEndpoint, err } -func (a *vpcEndpointsPreviewImpl) Delete(ctx context.Context, request DeleteVpcEndpointRequest) error { +func (a *vpcEndpointsImpl) Delete(ctx context.Context, request DeleteVpcEndpointRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/vpc-endpoints/%v", a.client.ConfiguredAccountID(), request.VpcEndpointId) queryParams := make(map[string]any) @@ -277,7 +277,7 @@ func (a *vpcEndpointsPreviewImpl) Delete(ctx context.Context, request DeleteVpcE return err } -func (a *vpcEndpointsPreviewImpl) Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error) { +func (a *vpcEndpointsImpl) Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error) { var vpcEndpoint VpcEndpoint path := fmt.Sprintf("/api/2.0preview/accounts/%v/vpc-endpoints/%v", a.client.ConfiguredAccountID(), request.VpcEndpointId) queryParams := make(map[string]any) @@ -287,7 +287,7 @@ func (a *vpcEndpointsPreviewImpl) Get(ctx context.Context, request GetVpcEndpoin return &vpcEndpoint, err } -func (a *vpcEndpointsPreviewImpl) List(ctx context.Context) ([]VpcEndpoint, error) { +func (a *vpcEndpointsImpl) List(ctx context.Context) ([]VpcEndpoint, error) { var vpcEndpointList []VpcEndpoint path := fmt.Sprintf("/api/2.0preview/accounts/%v/vpc-endpoints", a.client.ConfiguredAccountID()) @@ -297,12 +297,12 @@ func (a *vpcEndpointsPreviewImpl) List(ctx context.Context) ([]VpcEndpoint, erro return vpcEndpointList, err } -// unexported type that holds implementations of just WorkspacesPreview API methods -type workspacesPreviewImpl struct { +// unexported type that holds implementations of just Workspaces API methods +type workspacesImpl struct { client *client.DatabricksClient } -func (a *workspacesPreviewImpl) Create(ctx context.Context, request CreateWorkspaceRequest) (*Workspace, error) { +func (a *workspacesImpl) Create(ctx context.Context, request CreateWorkspaceRequest) (*Workspace, error) { var workspace Workspace path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -313,7 +313,7 @@ func (a *workspacesPreviewImpl) Create(ctx context.Context, request CreateWorksp return &workspace, err } -func (a *workspacesPreviewImpl) Delete(ctx context.Context, request DeleteWorkspaceRequest) error { +func (a *workspacesImpl) Delete(ctx context.Context, request DeleteWorkspaceRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v", a.client.ConfiguredAccountID(), request.WorkspaceId) queryParams := make(map[string]any) @@ -323,7 +323,7 @@ func (a *workspacesPreviewImpl) Delete(ctx context.Context, request DeleteWorksp return err } -func (a *workspacesPreviewImpl) Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error) { +func (a *workspacesImpl) Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error) { var workspace Workspace path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v", a.client.ConfiguredAccountID(), request.WorkspaceId) queryParams := make(map[string]any) @@ -333,7 +333,7 @@ func (a *workspacesPreviewImpl) Get(ctx context.Context, request GetWorkspaceReq return &workspace, err } -func (a *workspacesPreviewImpl) List(ctx context.Context) ([]Workspace, error) { +func (a *workspacesImpl) List(ctx context.Context) ([]Workspace, error) { var workspaceList []Workspace path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces", a.client.ConfiguredAccountID()) @@ -343,7 +343,7 @@ func (a *workspacesPreviewImpl) List(ctx context.Context) ([]Workspace, error) { return workspaceList, err } -func (a *workspacesPreviewImpl) Update(ctx context.Context, request UpdateWorkspaceRequest) error { +func (a *workspacesImpl) Update(ctx context.Context, request UpdateWorkspaceRequest) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/workspaces/%v", a.client.ConfiguredAccountID(), request.WorkspaceId) queryParams := make(map[string]any) diff --git a/serving/v2preview/api.go b/serving/v2preview/api.go index cc9cc1f97..0dac2cf2b 100755 --- a/serving/v2preview/api.go +++ b/serving/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Serving Endpoints, Serving Endpoints Data Plane Preview, Serving Endpoints Preview, etc. +// These APIs allow you to manage Serving Endpoints, Serving Endpoints Data Plane, etc. package servingpreview import ( @@ -11,54 +11,6 @@ import ( ) type ServingEndpointsInterface interface { -} - -func NewServingEndpoints(client *client.DatabricksClient) *ServingEndpointsAPI { - return &ServingEndpointsAPI{ - servingEndpointsImpl: servingEndpointsImpl{ - client: client, - }, - } -} - -// The Serving Endpoints API allows you to create, update, and delete model -// serving endpoints. -// -// You can use a serving endpoint to serve models from the Databricks Model -// Registry or from Unity Catalog. Endpoints expose the underlying models as -// scalable REST API endpoints using serverless compute. This means the -// endpoints and associated compute resources are fully managed by Databricks -// and will not appear in your cloud account. A serving endpoint can consist of -// one or more MLflow models from the Databricks Model Registry, called served -// entities. A serving endpoint can have at most ten served entities. You can -// configure traffic settings to define how requests should be routed to your -// served entities behind an endpoint. Additionally, you can configure the scale -// of resources that should be applied to each served entity. -type ServingEndpointsAPI struct { - servingEndpointsImpl -} - -type ServingEndpointsDataPlanePreviewInterface interface { - - // Query a serving endpoint. - Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) -} - -func NewServingEndpointsDataPlanePreview(client *client.DatabricksClient) *ServingEndpointsDataPlanePreviewAPI { - return &ServingEndpointsDataPlanePreviewAPI{ - servingEndpointsDataPlanePreviewImpl: servingEndpointsDataPlanePreviewImpl{ - client: client, - }, - } -} - -// Serving endpoints DataPlane provides a set of operations to interact with -// data plane endpoints for Serving endpoints service. -type ServingEndpointsDataPlanePreviewAPI struct { - servingEndpointsDataPlanePreviewImpl -} - -type ServingEndpointsPreviewInterface interface { // Get build logs for a served model. // @@ -204,9 +156,9 @@ type ServingEndpointsPreviewInterface interface { UpdatePermissions(ctx context.Context, request ServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) } -func NewServingEndpointsPreview(client *client.DatabricksClient) *ServingEndpointsPreviewAPI { - return &ServingEndpointsPreviewAPI{ - servingEndpointsPreviewImpl: servingEndpointsPreviewImpl{ +func NewServingEndpoints(client *client.DatabricksClient) *ServingEndpointsAPI { + return &ServingEndpointsAPI{ + servingEndpointsImpl: servingEndpointsImpl{ client: client, }, } @@ -225,23 +177,23 @@ func NewServingEndpointsPreview(client *client.DatabricksClient) *ServingEndpoin // configure traffic settings to define how requests should be routed to your // served entities behind an endpoint. Additionally, you can configure the scale // of resources that should be applied to each served entity. -type ServingEndpointsPreviewAPI struct { - servingEndpointsPreviewImpl +type ServingEndpointsAPI struct { + servingEndpointsImpl } // Get build logs for a served model. // // Retrieves the build logs associated with the provided served model. -func (a *ServingEndpointsPreviewAPI) BuildLogsByNameAndServedModelName(ctx context.Context, name string, servedModelName string) (*BuildLogsResponse, error) { - return a.servingEndpointsPreviewImpl.BuildLogs(ctx, BuildLogsRequest{ +func (a *ServingEndpointsAPI) BuildLogsByNameAndServedModelName(ctx context.Context, name string, servedModelName string) (*BuildLogsResponse, error) { + return a.servingEndpointsImpl.BuildLogs(ctx, BuildLogsRequest{ Name: name, ServedModelName: servedModelName, }) } // Delete a serving endpoint. -func (a *ServingEndpointsPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.servingEndpointsPreviewImpl.Delete(ctx, DeleteServingEndpointRequest{ +func (a *ServingEndpointsAPI) DeleteByName(ctx context.Context, name string) error { + return a.servingEndpointsImpl.Delete(ctx, DeleteServingEndpointRequest{ Name: name, }) } @@ -250,8 +202,8 @@ func (a *ServingEndpointsPreviewAPI) DeleteByName(ctx context.Context, name stri // // Retrieves the metrics associated with the provided serving endpoint in either // Prometheus or OpenMetrics exposition format. -func (a *ServingEndpointsPreviewAPI) ExportMetricsByName(ctx context.Context, name string) (*ExportMetricsResponse, error) { - return a.servingEndpointsPreviewImpl.ExportMetrics(ctx, ExportMetricsRequest{ +func (a *ServingEndpointsAPI) ExportMetricsByName(ctx context.Context, name string) (*ExportMetricsResponse, error) { + return a.servingEndpointsImpl.ExportMetrics(ctx, ExportMetricsRequest{ Name: name, }) } @@ -259,8 +211,8 @@ func (a *ServingEndpointsPreviewAPI) ExportMetricsByName(ctx context.Context, na // Get a single serving endpoint. // // Retrieves the details for a single serving endpoint. -func (a *ServingEndpointsPreviewAPI) GetByName(ctx context.Context, name string) (*ServingEndpointDetailed, error) { - return a.servingEndpointsPreviewImpl.Get(ctx, GetServingEndpointRequest{ +func (a *ServingEndpointsAPI) GetByName(ctx context.Context, name string) (*ServingEndpointDetailed, error) { + return a.servingEndpointsImpl.Get(ctx, GetServingEndpointRequest{ Name: name, }) } @@ -270,8 +222,8 @@ func (a *ServingEndpointsPreviewAPI) GetByName(ctx context.Context, name string) // Get the query schema of the serving endpoint in OpenAPI format. The schema // contains information for the supported paths, input and output format and // datatypes. -func (a *ServingEndpointsPreviewAPI) GetOpenApiByName(ctx context.Context, name string) (*GetOpenApiResponse, error) { - return a.servingEndpointsPreviewImpl.GetOpenApi(ctx, GetOpenApiRequest{ +func (a *ServingEndpointsAPI) GetOpenApiByName(ctx context.Context, name string) (*GetOpenApiResponse, error) { + return a.servingEndpointsImpl.GetOpenApi(ctx, GetOpenApiRequest{ Name: name, }) } @@ -279,8 +231,8 @@ func (a *ServingEndpointsPreviewAPI) GetOpenApiByName(ctx context.Context, name // Get serving endpoint permission levels. // // Gets the permission levels that a user can have on an object. -func (a *ServingEndpointsPreviewAPI) GetPermissionLevelsByServingEndpointId(ctx context.Context, servingEndpointId string) (*GetServingEndpointPermissionLevelsResponse, error) { - return a.servingEndpointsPreviewImpl.GetPermissionLevels(ctx, GetServingEndpointPermissionLevelsRequest{ +func (a *ServingEndpointsAPI) GetPermissionLevelsByServingEndpointId(ctx context.Context, servingEndpointId string) (*GetServingEndpointPermissionLevelsResponse, error) { + return a.servingEndpointsImpl.GetPermissionLevels(ctx, GetServingEndpointPermissionLevelsRequest{ ServingEndpointId: servingEndpointId, }) } @@ -289,8 +241,8 @@ func (a *ServingEndpointsPreviewAPI) GetPermissionLevelsByServingEndpointId(ctx // // Gets the permissions of a serving endpoint. Serving endpoints can inherit // permissions from their root object. -func (a *ServingEndpointsPreviewAPI) GetPermissionsByServingEndpointId(ctx context.Context, servingEndpointId string) (*ServingEndpointPermissions, error) { - return a.servingEndpointsPreviewImpl.GetPermissions(ctx, GetServingEndpointPermissionsRequest{ +func (a *ServingEndpointsAPI) GetPermissionsByServingEndpointId(ctx context.Context, servingEndpointId string) (*ServingEndpointPermissions, error) { + return a.servingEndpointsImpl.GetPermissions(ctx, GetServingEndpointPermissionsRequest{ ServingEndpointId: servingEndpointId, }) } @@ -298,9 +250,29 @@ func (a *ServingEndpointsPreviewAPI) GetPermissionsByServingEndpointId(ctx conte // Get the latest logs for a served model. // // Retrieves the service logs associated with the provided served model. -func (a *ServingEndpointsPreviewAPI) LogsByNameAndServedModelName(ctx context.Context, name string, servedModelName string) (*ServerLogsResponse, error) { - return a.servingEndpointsPreviewImpl.Logs(ctx, LogsRequest{ +func (a *ServingEndpointsAPI) LogsByNameAndServedModelName(ctx context.Context, name string, servedModelName string) (*ServerLogsResponse, error) { + return a.servingEndpointsImpl.Logs(ctx, LogsRequest{ Name: name, ServedModelName: servedModelName, }) } + +type ServingEndpointsDataPlaneInterface interface { + + // Query a serving endpoint. + Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) +} + +func NewServingEndpointsDataPlane(client *client.DatabricksClient) *ServingEndpointsDataPlaneAPI { + return &ServingEndpointsDataPlaneAPI{ + servingEndpointsDataPlaneImpl: servingEndpointsDataPlaneImpl{ + client: client, + }, + } +} + +// Serving endpoints DataPlane provides a set of operations to interact with +// data plane endpoints for Serving endpoints service. +type ServingEndpointsDataPlaneAPI struct { + servingEndpointsDataPlaneImpl +} diff --git a/serving/v2preview/client.go b/serving/v2preview/client.go index e81c0c789..2147fbd19 100755 --- a/serving/v2preview/client.go +++ b/serving/v2preview/client.go @@ -44,13 +44,13 @@ func NewServingEndpointsClient(cfg *config.Config) (*ServingEndpointsClient, err }, nil } -type ServingEndpointsDataPlanePreviewClient struct { - ServingEndpointsDataPlanePreviewInterface +type ServingEndpointsDataPlaneClient struct { + ServingEndpointsDataPlaneInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewServingEndpointsDataPlanePreviewClient(cfg *config.Config) (*ServingEndpointsDataPlanePreviewClient, error) { +func NewServingEndpointsDataPlaneClient(cfg *config.Config) (*ServingEndpointsDataPlaneClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,43 +71,9 @@ func NewServingEndpointsDataPlanePreviewClient(cfg *config.Config) (*ServingEndp return nil, err } - return &ServingEndpointsDataPlanePreviewClient{ - Config: cfg, - apiClient: apiClient, - ServingEndpointsDataPlanePreviewInterface: NewServingEndpointsDataPlanePreview(databricksClient), - }, nil -} - -type ServingEndpointsPreviewClient struct { - ServingEndpointsPreviewInterface - Config *config.Config - apiClient *httpclient.ApiClient -} - -func NewServingEndpointsPreviewClient(cfg *config.Config) (*ServingEndpointsPreviewClient, error) { - if cfg == nil { - cfg = &config.Config{} - } - - err := cfg.EnsureResolved() - if err != nil { - return nil, err - } - if cfg.IsAccountClient() { - return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") - } - apiClient, err := cfg.NewApiClient() - if err != nil { - return nil, err - } - databricksClient, err := client.NewWithClient(cfg, apiClient) - if err != nil { - return nil, err - } - - return &ServingEndpointsPreviewClient{ - Config: cfg, - apiClient: apiClient, - ServingEndpointsPreviewInterface: NewServingEndpointsPreview(databricksClient), + return &ServingEndpointsDataPlaneClient{ + Config: cfg, + apiClient: apiClient, + ServingEndpointsDataPlaneInterface: NewServingEndpointsDataPlane(databricksClient), }, nil } diff --git a/serving/v2preview/impl.go b/serving/v2preview/impl.go index 120e26f72..736760489 100755 --- a/serving/v2preview/impl.go +++ b/serving/v2preview/impl.go @@ -17,28 +17,7 @@ type servingEndpointsImpl struct { client *client.DatabricksClient } -// unexported type that holds implementations of just ServingEndpointsDataPlanePreview API methods -type servingEndpointsDataPlanePreviewImpl struct { - client *client.DatabricksClient -} - -func (a *servingEndpointsDataPlanePreviewImpl) Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) { - var queryEndpointResponse QueryEndpointResponse - path := fmt.Sprintf("/api/preview//serving-endpoints/%v/invocations", request.Name) - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &queryEndpointResponse) - return &queryEndpointResponse, err -} - -// unexported type that holds implementations of just ServingEndpointsPreview API methods -type servingEndpointsPreviewImpl struct { - client *client.DatabricksClient -} - -func (a *servingEndpointsPreviewImpl) BuildLogs(ctx context.Context, request BuildLogsRequest) (*BuildLogsResponse, error) { +func (a *servingEndpointsImpl) BuildLogs(ctx context.Context, request BuildLogsRequest) (*BuildLogsResponse, error) { var buildLogsResponse BuildLogsResponse path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/served-models/%v/build-logs", request.Name, request.ServedModelName) queryParams := make(map[string]any) @@ -48,7 +27,7 @@ func (a *servingEndpointsPreviewImpl) BuildLogs(ctx context.Context, request Bui return &buildLogsResponse, err } -func (a *servingEndpointsPreviewImpl) Create(ctx context.Context, request CreateServingEndpoint) (*ServingEndpointDetailed, error) { +func (a *servingEndpointsImpl) Create(ctx context.Context, request CreateServingEndpoint) (*ServingEndpointDetailed, error) { var servingEndpointDetailed ServingEndpointDetailed path := "/api/2.0preview/serving-endpoints" queryParams := make(map[string]any) @@ -59,7 +38,7 @@ func (a *servingEndpointsPreviewImpl) Create(ctx context.Context, request Create return &servingEndpointDetailed, err } -func (a *servingEndpointsPreviewImpl) Delete(ctx context.Context, request DeleteServingEndpointRequest) error { +func (a *servingEndpointsImpl) Delete(ctx context.Context, request DeleteServingEndpointRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v", request.Name) queryParams := make(map[string]any) @@ -69,7 +48,7 @@ func (a *servingEndpointsPreviewImpl) Delete(ctx context.Context, request Delete return err } -func (a *servingEndpointsPreviewImpl) ExportMetrics(ctx context.Context, request ExportMetricsRequest) (*ExportMetricsResponse, error) { +func (a *servingEndpointsImpl) ExportMetrics(ctx context.Context, request ExportMetricsRequest) (*ExportMetricsResponse, error) { var exportMetricsResponse ExportMetricsResponse path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/metrics", request.Name) queryParams := make(map[string]any) @@ -79,7 +58,7 @@ func (a *servingEndpointsPreviewImpl) ExportMetrics(ctx context.Context, request return &exportMetricsResponse, err } -func (a *servingEndpointsPreviewImpl) Get(ctx context.Context, request GetServingEndpointRequest) (*ServingEndpointDetailed, error) { +func (a *servingEndpointsImpl) Get(ctx context.Context, request GetServingEndpointRequest) (*ServingEndpointDetailed, error) { var servingEndpointDetailed ServingEndpointDetailed path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v", request.Name) queryParams := make(map[string]any) @@ -89,7 +68,7 @@ func (a *servingEndpointsPreviewImpl) Get(ctx context.Context, request GetServin return &servingEndpointDetailed, err } -func (a *servingEndpointsPreviewImpl) GetOpenApi(ctx context.Context, request GetOpenApiRequest) (*GetOpenApiResponse, error) { +func (a *servingEndpointsImpl) GetOpenApi(ctx context.Context, request GetOpenApiRequest) (*GetOpenApiResponse, error) { var getOpenApiResponse GetOpenApiResponse path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/openapi", request.Name) queryParams := make(map[string]any) @@ -99,7 +78,7 @@ func (a *servingEndpointsPreviewImpl) GetOpenApi(ctx context.Context, request Ge return &getOpenApiResponse, err } -func (a *servingEndpointsPreviewImpl) GetPermissionLevels(ctx context.Context, request GetServingEndpointPermissionLevelsRequest) (*GetServingEndpointPermissionLevelsResponse, error) { +func (a *servingEndpointsImpl) GetPermissionLevels(ctx context.Context, request GetServingEndpointPermissionLevelsRequest) (*GetServingEndpointPermissionLevelsResponse, error) { var getServingEndpointPermissionLevelsResponse GetServingEndpointPermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/serving-endpoints/%v/permissionLevels", request.ServingEndpointId) queryParams := make(map[string]any) @@ -109,7 +88,7 @@ func (a *servingEndpointsPreviewImpl) GetPermissionLevels(ctx context.Context, r return &getServingEndpointPermissionLevelsResponse, err } -func (a *servingEndpointsPreviewImpl) GetPermissions(ctx context.Context, request GetServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { +func (a *servingEndpointsImpl) GetPermissions(ctx context.Context, request GetServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { var servingEndpointPermissions ServingEndpointPermissions path := fmt.Sprintf("/api/2.0preview/permissions/serving-endpoints/%v", request.ServingEndpointId) queryParams := make(map[string]any) @@ -119,7 +98,7 @@ func (a *servingEndpointsPreviewImpl) GetPermissions(ctx context.Context, reques return &servingEndpointPermissions, err } -func (a *servingEndpointsPreviewImpl) HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*HttpRequestResponse, error) { +func (a *servingEndpointsImpl) HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*HttpRequestResponse, error) { var httpRequestResponse HttpRequestResponse path := "/api/2.0preview/external-function" queryParams := make(map[string]any) @@ -131,7 +110,7 @@ func (a *servingEndpointsPreviewImpl) HttpRequest(ctx context.Context, request E } // Get all serving endpoints. -func (a *servingEndpointsPreviewImpl) List(ctx context.Context) listing.Iterator[ServingEndpoint] { +func (a *servingEndpointsImpl) List(ctx context.Context) listing.Iterator[ServingEndpoint] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*ListEndpointsResponse, error) { @@ -151,11 +130,11 @@ func (a *servingEndpointsPreviewImpl) List(ctx context.Context) listing.Iterator } // Get all serving endpoints. -func (a *servingEndpointsPreviewImpl) ListAll(ctx context.Context) ([]ServingEndpoint, error) { +func (a *servingEndpointsImpl) ListAll(ctx context.Context) ([]ServingEndpoint, error) { iterator := a.List(ctx) return listing.ToSlice[ServingEndpoint](ctx, iterator) } -func (a *servingEndpointsPreviewImpl) internalList(ctx context.Context) (*ListEndpointsResponse, error) { +func (a *servingEndpointsImpl) internalList(ctx context.Context) (*ListEndpointsResponse, error) { var listEndpointsResponse ListEndpointsResponse path := "/api/2.0preview/serving-endpoints" @@ -165,7 +144,7 @@ func (a *servingEndpointsPreviewImpl) internalList(ctx context.Context) (*ListEn return &listEndpointsResponse, err } -func (a *servingEndpointsPreviewImpl) Logs(ctx context.Context, request LogsRequest) (*ServerLogsResponse, error) { +func (a *servingEndpointsImpl) Logs(ctx context.Context, request LogsRequest) (*ServerLogsResponse, error) { var serverLogsResponse ServerLogsResponse path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/served-models/%v/logs", request.Name, request.ServedModelName) queryParams := make(map[string]any) @@ -175,7 +154,7 @@ func (a *servingEndpointsPreviewImpl) Logs(ctx context.Context, request LogsRequ return &serverLogsResponse, err } -func (a *servingEndpointsPreviewImpl) Patch(ctx context.Context, request PatchServingEndpointTags) (*EndpointTags, error) { +func (a *servingEndpointsImpl) Patch(ctx context.Context, request PatchServingEndpointTags) (*EndpointTags, error) { var endpointTags EndpointTags path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/tags", request.Name) queryParams := make(map[string]any) @@ -186,7 +165,7 @@ func (a *servingEndpointsPreviewImpl) Patch(ctx context.Context, request PatchSe return &endpointTags, err } -func (a *servingEndpointsPreviewImpl) Put(ctx context.Context, request PutRequest) (*PutResponse, error) { +func (a *servingEndpointsImpl) Put(ctx context.Context, request PutRequest) (*PutResponse, error) { var putResponse PutResponse path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/rate-limits", request.Name) queryParams := make(map[string]any) @@ -197,7 +176,7 @@ func (a *servingEndpointsPreviewImpl) Put(ctx context.Context, request PutReques return &putResponse, err } -func (a *servingEndpointsPreviewImpl) PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) { +func (a *servingEndpointsImpl) PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) { var putAiGatewayResponse PutAiGatewayResponse path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/ai-gateway", request.Name) queryParams := make(map[string]any) @@ -208,7 +187,7 @@ func (a *servingEndpointsPreviewImpl) PutAiGateway(ctx context.Context, request return &putAiGatewayResponse, err } -func (a *servingEndpointsPreviewImpl) Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) { +func (a *servingEndpointsImpl) Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) { var queryEndpointResponse QueryEndpointResponse path := fmt.Sprintf("/api/preview//serving-endpoints/%v/invocations", request.Name) queryParams := make(map[string]any) @@ -219,7 +198,7 @@ func (a *servingEndpointsPreviewImpl) Query(ctx context.Context, request QueryEn return &queryEndpointResponse, err } -func (a *servingEndpointsPreviewImpl) SetPermissions(ctx context.Context, request ServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { +func (a *servingEndpointsImpl) SetPermissions(ctx context.Context, request ServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { var servingEndpointPermissions ServingEndpointPermissions path := fmt.Sprintf("/api/2.0preview/permissions/serving-endpoints/%v", request.ServingEndpointId) queryParams := make(map[string]any) @@ -230,7 +209,7 @@ func (a *servingEndpointsPreviewImpl) SetPermissions(ctx context.Context, reques return &servingEndpointPermissions, err } -func (a *servingEndpointsPreviewImpl) UpdateConfig(ctx context.Context, request EndpointCoreConfigInput) (*ServingEndpointDetailed, error) { +func (a *servingEndpointsImpl) UpdateConfig(ctx context.Context, request EndpointCoreConfigInput) (*ServingEndpointDetailed, error) { var servingEndpointDetailed ServingEndpointDetailed path := fmt.Sprintf("/api/2.0preview/serving-endpoints/%v/config", request.Name) queryParams := make(map[string]any) @@ -241,7 +220,7 @@ func (a *servingEndpointsPreviewImpl) UpdateConfig(ctx context.Context, request return &servingEndpointDetailed, err } -func (a *servingEndpointsPreviewImpl) UpdatePermissions(ctx context.Context, request ServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { +func (a *servingEndpointsImpl) UpdatePermissions(ctx context.Context, request ServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { var servingEndpointPermissions ServingEndpointPermissions path := fmt.Sprintf("/api/2.0preview/permissions/serving-endpoints/%v", request.ServingEndpointId) queryParams := make(map[string]any) @@ -251,3 +230,19 @@ func (a *servingEndpointsPreviewImpl) UpdatePermissions(ctx context.Context, req err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &servingEndpointPermissions) return &servingEndpointPermissions, err } + +// unexported type that holds implementations of just ServingEndpointsDataPlane API methods +type servingEndpointsDataPlaneImpl struct { + client *client.DatabricksClient +} + +func (a *servingEndpointsDataPlaneImpl) Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) { + var queryEndpointResponse QueryEndpointResponse + path := fmt.Sprintf("/api/preview//serving-endpoints/%v/invocations", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &queryEndpointResponse) + return &queryEndpointResponse, err +} diff --git a/settings/v2preview/api.go b/settings/v2preview/api.go index 7959cf4e6..88f25ac0a 100755 --- a/settings/v2preview/api.go +++ b/settings/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Account Ip Access Lists Preview, Account Settings, Account Settings Preview, Aibi Dashboard Embedding Access Policy Preview, Aibi Dashboard Embedding Approved Domains Preview, Automatic Cluster Update Preview, Compliance Security Profile Preview, Credentials Manager Preview, Csp Enablement Account Preview, Default Namespace Preview, Disable Legacy Access Preview, Disable Legacy Dbfs Preview, Disable Legacy Features Preview, Enable Ip Access Lists Preview, Enhanced Security Monitoring Preview, Esm Enablement Account Preview, Ip Access Lists Preview, Network Connectivity Preview, Notification Destinations Preview, Personal Compute Preview, Restrict Workspace Admins Preview, Settings, Settings Preview, Token Management Preview, Tokens Preview, Workspace Conf Preview, etc. +// These APIs allow you to manage Account Ip Access Lists, Account Settings, Aibi Dashboard Embedding Access Policy, Aibi Dashboard Embedding Approved Domains, Automatic Cluster Update, Compliance Security Profile, Credentials Manager, Csp Enablement Account, Default Namespace, Disable Legacy Access, Disable Legacy Dbfs, Disable Legacy Features, Enable Ip Access Lists, Enhanced Security Monitoring, Esm Enablement Account, Ip Access Lists, Network Connectivity, Notification Destinations, Personal Compute, Restrict Workspace Admins, Settings, Token Management, Tokens, Workspace Conf, etc. package settingspreview import ( @@ -12,7 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type AccountIpAccessListsPreviewInterface interface { +type AccountIpAccessListsInterface interface { // Create access list. // @@ -66,7 +66,7 @@ type AccountIpAccessListsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context) ([]IpAccessListInfo, error) - // IpAccessListInfoLabelToListIdMap calls [AccountIpAccessListsPreviewAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. + // IpAccessListInfoLabelToListIdMap calls [AccountIpAccessListsAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. // // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. // @@ -75,7 +75,7 @@ type AccountIpAccessListsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. IpAccessListInfoLabelToListIdMap(ctx context.Context) (map[string]string, error) - // GetByLabel calls [AccountIpAccessListsPreviewAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. + // GetByLabel calls [AccountIpAccessListsAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. // // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. // @@ -118,9 +118,9 @@ type AccountIpAccessListsPreviewInterface interface { Update(ctx context.Context, request UpdateIpAccessList) error } -func NewAccountIpAccessListsPreview(client *client.DatabricksClient) *AccountIpAccessListsPreviewAPI { - return &AccountIpAccessListsPreviewAPI{ - accountIpAccessListsPreviewImpl: accountIpAccessListsPreviewImpl{ +func NewAccountIpAccessLists(client *client.DatabricksClient) *AccountIpAccessListsAPI { + return &AccountIpAccessListsAPI{ + accountIpAccessListsImpl: accountIpAccessListsImpl{ client: client, }, } @@ -148,15 +148,15 @@ func NewAccountIpAccessListsPreview(client *client.DatabricksClient) *AccountIpA // // After changes to the account-level IP access lists, it can take a few minutes // for changes to take effect. -type AccountIpAccessListsPreviewAPI struct { - accountIpAccessListsPreviewImpl +type AccountIpAccessListsAPI struct { + accountIpAccessListsImpl } // Delete access list. // // Deletes an IP access list, specified by its list ID. -func (a *AccountIpAccessListsPreviewAPI) DeleteByIpAccessListId(ctx context.Context, ipAccessListId string) error { - return a.accountIpAccessListsPreviewImpl.Delete(ctx, DeleteAccountIpAccessListRequest{ +func (a *AccountIpAccessListsAPI) DeleteByIpAccessListId(ctx context.Context, ipAccessListId string) error { + return a.accountIpAccessListsImpl.Delete(ctx, DeleteAccountIpAccessListRequest{ IpAccessListId: ipAccessListId, }) } @@ -164,20 +164,20 @@ func (a *AccountIpAccessListsPreviewAPI) DeleteByIpAccessListId(ctx context.Cont // Get IP access list. // // Gets an IP access list, specified by its list ID. -func (a *AccountIpAccessListsPreviewAPI) GetByIpAccessListId(ctx context.Context, ipAccessListId string) (*GetIpAccessListResponse, error) { - return a.accountIpAccessListsPreviewImpl.Get(ctx, GetAccountIpAccessListRequest{ +func (a *AccountIpAccessListsAPI) GetByIpAccessListId(ctx context.Context, ipAccessListId string) (*GetIpAccessListResponse, error) { + return a.accountIpAccessListsImpl.Get(ctx, GetAccountIpAccessListRequest{ IpAccessListId: ipAccessListId, }) } -// IpAccessListInfoLabelToListIdMap calls [AccountIpAccessListsPreviewAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. +// IpAccessListInfoLabelToListIdMap calls [AccountIpAccessListsAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. // // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. // // Note: All [IpAccessListInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *AccountIpAccessListsPreviewAPI) IpAccessListInfoLabelToListIdMap(ctx context.Context) (map[string]string, error) { +func (a *AccountIpAccessListsAPI) IpAccessListInfoLabelToListIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx) @@ -195,14 +195,14 @@ func (a *AccountIpAccessListsPreviewAPI) IpAccessListInfoLabelToListIdMap(ctx co return mapping, nil } -// GetByLabel calls [AccountIpAccessListsPreviewAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. +// GetByLabel calls [AccountIpAccessListsAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. // // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. // // Note: All [IpAccessListInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *AccountIpAccessListsPreviewAPI) GetByLabel(ctx context.Context, name string) (*IpAccessListInfo, error) { +func (a *AccountIpAccessListsAPI) GetByLabel(ctx context.Context, name string) (*IpAccessListInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx) if err != nil { @@ -239,23 +239,7 @@ type AccountSettingsAPI struct { accountSettingsImpl } -type AccountSettingsPreviewInterface interface { -} - -func NewAccountSettingsPreview(client *client.DatabricksClient) *AccountSettingsPreviewAPI { - return &AccountSettingsPreviewAPI{ - accountSettingsPreviewImpl: accountSettingsPreviewImpl{ - client: client, - }, - } -} - -// Accounts Settings API allows users to manage settings at the account level. -type AccountSettingsPreviewAPI struct { - accountSettingsPreviewImpl -} - -type AibiDashboardEmbeddingAccessPolicyPreviewInterface interface { +type AibiDashboardEmbeddingAccessPolicyInterface interface { // Delete the AI/BI dashboard embedding access policy. // @@ -276,9 +260,9 @@ type AibiDashboardEmbeddingAccessPolicyPreviewInterface interface { Update(ctx context.Context, request UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) } -func NewAibiDashboardEmbeddingAccessPolicyPreview(client *client.DatabricksClient) *AibiDashboardEmbeddingAccessPolicyPreviewAPI { - return &AibiDashboardEmbeddingAccessPolicyPreviewAPI{ - aibiDashboardEmbeddingAccessPolicyPreviewImpl: aibiDashboardEmbeddingAccessPolicyPreviewImpl{ +func NewAibiDashboardEmbeddingAccessPolicy(client *client.DatabricksClient) *AibiDashboardEmbeddingAccessPolicyAPI { + return &AibiDashboardEmbeddingAccessPolicyAPI{ + aibiDashboardEmbeddingAccessPolicyImpl: aibiDashboardEmbeddingAccessPolicyImpl{ client: client, }, } @@ -287,11 +271,11 @@ func NewAibiDashboardEmbeddingAccessPolicyPreview(client *client.DatabricksClien // Controls whether AI/BI published dashboard embedding is enabled, // conditionally enabled, or disabled at the workspace level. By default, this // setting is conditionally enabled (ALLOW_APPROVED_DOMAINS). -type AibiDashboardEmbeddingAccessPolicyPreviewAPI struct { - aibiDashboardEmbeddingAccessPolicyPreviewImpl +type AibiDashboardEmbeddingAccessPolicyAPI struct { + aibiDashboardEmbeddingAccessPolicyImpl } -type AibiDashboardEmbeddingApprovedDomainsPreviewInterface interface { +type AibiDashboardEmbeddingApprovedDomainsInterface interface { // Delete AI/BI dashboard embedding approved domains. // @@ -312,9 +296,9 @@ type AibiDashboardEmbeddingApprovedDomainsPreviewInterface interface { Update(ctx context.Context, request UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) } -func NewAibiDashboardEmbeddingApprovedDomainsPreview(client *client.DatabricksClient) *AibiDashboardEmbeddingApprovedDomainsPreviewAPI { - return &AibiDashboardEmbeddingApprovedDomainsPreviewAPI{ - aibiDashboardEmbeddingApprovedDomainsPreviewImpl: aibiDashboardEmbeddingApprovedDomainsPreviewImpl{ +func NewAibiDashboardEmbeddingApprovedDomains(client *client.DatabricksClient) *AibiDashboardEmbeddingApprovedDomainsAPI { + return &AibiDashboardEmbeddingApprovedDomainsAPI{ + aibiDashboardEmbeddingApprovedDomainsImpl: aibiDashboardEmbeddingApprovedDomainsImpl{ client: client, }, } @@ -323,11 +307,11 @@ func NewAibiDashboardEmbeddingApprovedDomainsPreview(client *client.DatabricksCl // Controls the list of domains approved to host the embedded AI/BI dashboards. // The approved domains list can't be mutated when the current access policy is // not set to ALLOW_APPROVED_DOMAINS. -type AibiDashboardEmbeddingApprovedDomainsPreviewAPI struct { - aibiDashboardEmbeddingApprovedDomainsPreviewImpl +type AibiDashboardEmbeddingApprovedDomainsAPI struct { + aibiDashboardEmbeddingApprovedDomainsImpl } -type AutomaticClusterUpdatePreviewInterface interface { +type AutomaticClusterUpdateInterface interface { // Get the automatic cluster update setting. // @@ -344,9 +328,9 @@ type AutomaticClusterUpdatePreviewInterface interface { Update(ctx context.Context, request UpdateAutomaticClusterUpdateSettingRequest) (*AutomaticClusterUpdateSetting, error) } -func NewAutomaticClusterUpdatePreview(client *client.DatabricksClient) *AutomaticClusterUpdatePreviewAPI { - return &AutomaticClusterUpdatePreviewAPI{ - automaticClusterUpdatePreviewImpl: automaticClusterUpdatePreviewImpl{ +func NewAutomaticClusterUpdate(client *client.DatabricksClient) *AutomaticClusterUpdateAPI { + return &AutomaticClusterUpdateAPI{ + automaticClusterUpdateImpl: automaticClusterUpdateImpl{ client: client, }, } @@ -354,11 +338,11 @@ func NewAutomaticClusterUpdatePreview(client *client.DatabricksClient) *Automati // Controls whether automatic cluster update is enabled for the current // workspace. By default, it is turned off. -type AutomaticClusterUpdatePreviewAPI struct { - automaticClusterUpdatePreviewImpl +type AutomaticClusterUpdateAPI struct { + automaticClusterUpdateImpl } -type ComplianceSecurityProfilePreviewInterface interface { +type ComplianceSecurityProfileInterface interface { // Get the compliance security profile setting. // @@ -375,9 +359,9 @@ type ComplianceSecurityProfilePreviewInterface interface { Update(ctx context.Context, request UpdateComplianceSecurityProfileSettingRequest) (*ComplianceSecurityProfileSetting, error) } -func NewComplianceSecurityProfilePreview(client *client.DatabricksClient) *ComplianceSecurityProfilePreviewAPI { - return &ComplianceSecurityProfilePreviewAPI{ - complianceSecurityProfilePreviewImpl: complianceSecurityProfilePreviewImpl{ +func NewComplianceSecurityProfile(client *client.DatabricksClient) *ComplianceSecurityProfileAPI { + return &ComplianceSecurityProfileAPI{ + complianceSecurityProfileImpl: complianceSecurityProfileImpl{ client: client, }, } @@ -388,11 +372,11 @@ func NewComplianceSecurityProfilePreview(client *client.DatabricksClient) *Compl // off. // // This settings can NOT be disabled once it is enabled. -type ComplianceSecurityProfilePreviewAPI struct { - complianceSecurityProfilePreviewImpl +type ComplianceSecurityProfileAPI struct { + complianceSecurityProfileImpl } -type CredentialsManagerPreviewInterface interface { +type CredentialsManagerInterface interface { // Exchange token. // @@ -401,9 +385,9 @@ type CredentialsManagerPreviewInterface interface { ExchangeToken(ctx context.Context, request ExchangeTokenRequest) (*ExchangeTokenResponse, error) } -func NewCredentialsManagerPreview(client *client.DatabricksClient) *CredentialsManagerPreviewAPI { - return &CredentialsManagerPreviewAPI{ - credentialsManagerPreviewImpl: credentialsManagerPreviewImpl{ +func NewCredentialsManager(client *client.DatabricksClient) *CredentialsManagerAPI { + return &CredentialsManagerAPI{ + credentialsManagerImpl: credentialsManagerImpl{ client: client, }, } @@ -411,11 +395,11 @@ func NewCredentialsManagerPreview(client *client.DatabricksClient) *CredentialsM // Credentials manager interacts with with Identity Providers to to perform // token exchanges using stored credentials and refresh tokens. -type CredentialsManagerPreviewAPI struct { - credentialsManagerPreviewImpl +type CredentialsManagerAPI struct { + credentialsManagerImpl } -type CspEnablementAccountPreviewInterface interface { +type CspEnablementAccountInterface interface { // Get the compliance security profile setting for new workspaces. // @@ -429,9 +413,9 @@ type CspEnablementAccountPreviewInterface interface { Update(ctx context.Context, request UpdateCspEnablementAccountSettingRequest) (*CspEnablementAccountSetting, error) } -func NewCspEnablementAccountPreview(client *client.DatabricksClient) *CspEnablementAccountPreviewAPI { - return &CspEnablementAccountPreviewAPI{ - cspEnablementAccountPreviewImpl: cspEnablementAccountPreviewImpl{ +func NewCspEnablementAccount(client *client.DatabricksClient) *CspEnablementAccountAPI { + return &CspEnablementAccountAPI{ + cspEnablementAccountImpl: cspEnablementAccountImpl{ client: client, }, } @@ -444,11 +428,11 @@ func NewCspEnablementAccountPreview(client *client.DatabricksClient) *CspEnablem // // This settings can be disabled so that new workspaces do not have compliance // security profile enabled by default. -type CspEnablementAccountPreviewAPI struct { - cspEnablementAccountPreviewImpl +type CspEnablementAccountAPI struct { + cspEnablementAccountImpl } -type DefaultNamespacePreviewInterface interface { +type DefaultNamespaceInterface interface { // Delete the default namespace setting. // @@ -476,9 +460,9 @@ type DefaultNamespacePreviewInterface interface { Update(ctx context.Context, request UpdateDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) } -func NewDefaultNamespacePreview(client *client.DatabricksClient) *DefaultNamespacePreviewAPI { - return &DefaultNamespacePreviewAPI{ - defaultNamespacePreviewImpl: defaultNamespacePreviewImpl{ +func NewDefaultNamespace(client *client.DatabricksClient) *DefaultNamespaceAPI { + return &DefaultNamespaceAPI{ + defaultNamespaceImpl: defaultNamespaceImpl{ client: client, }, } @@ -496,11 +480,11 @@ func NewDefaultNamespacePreview(client *client.DatabricksClient) *DefaultNamespa // This setting requires a restart of clusters and SQL warehouses to take // effect. Additionally, the default namespace only applies when using Unity // Catalog-enabled compute. -type DefaultNamespacePreviewAPI struct { - defaultNamespacePreviewImpl +type DefaultNamespaceAPI struct { + defaultNamespaceImpl } -type DisableLegacyAccessPreviewInterface interface { +type DisableLegacyAccessInterface interface { // Delete Legacy Access Disablement Status. // @@ -518,9 +502,9 @@ type DisableLegacyAccessPreviewInterface interface { Update(ctx context.Context, request UpdateDisableLegacyAccessRequest) (*DisableLegacyAccess, error) } -func NewDisableLegacyAccessPreview(client *client.DatabricksClient) *DisableLegacyAccessPreviewAPI { - return &DisableLegacyAccessPreviewAPI{ - disableLegacyAccessPreviewImpl: disableLegacyAccessPreviewImpl{ +func NewDisableLegacyAccess(client *client.DatabricksClient) *DisableLegacyAccessAPI { + return &DisableLegacyAccessAPI{ + disableLegacyAccessImpl: disableLegacyAccessImpl{ client: client, }, } @@ -533,11 +517,11 @@ func NewDisableLegacyAccessPreview(client *client.DatabricksClient) *DisableLega // link) on any External Location access from the workspace. 3. Alters DBFS path // access to use External Location permissions in place of legacy credentials. // 4. Enforces Unity Catalog access on all path based access. -type DisableLegacyAccessPreviewAPI struct { - disableLegacyAccessPreviewImpl +type DisableLegacyAccessAPI struct { + disableLegacyAccessImpl } -type DisableLegacyDbfsPreviewInterface interface { +type DisableLegacyDbfsInterface interface { // Delete the disable legacy DBFS setting. // @@ -556,9 +540,9 @@ type DisableLegacyDbfsPreviewInterface interface { Update(ctx context.Context, request UpdateDisableLegacyDbfsRequest) (*DisableLegacyDbfs, error) } -func NewDisableLegacyDbfsPreview(client *client.DatabricksClient) *DisableLegacyDbfsPreviewAPI { - return &DisableLegacyDbfsPreviewAPI{ - disableLegacyDbfsPreviewImpl: disableLegacyDbfsPreviewImpl{ +func NewDisableLegacyDbfs(client *client.DatabricksClient) *DisableLegacyDbfsAPI { + return &DisableLegacyDbfsAPI{ + disableLegacyDbfsImpl: disableLegacyDbfsImpl{ client: client, }, } @@ -567,11 +551,11 @@ func NewDisableLegacyDbfsPreview(client *client.DatabricksClient) *DisableLegacy // When this setting is on, access to DBFS root and DBFS mounts is disallowed // (as well as creation of new mounts). When the setting is off, all DBFS // functionality is enabled -type DisableLegacyDbfsPreviewAPI struct { - disableLegacyDbfsPreviewImpl +type DisableLegacyDbfsAPI struct { + disableLegacyDbfsImpl } -type DisableLegacyFeaturesPreviewInterface interface { +type DisableLegacyFeaturesInterface interface { // Delete the disable legacy features setting. // @@ -589,9 +573,9 @@ type DisableLegacyFeaturesPreviewInterface interface { Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) } -func NewDisableLegacyFeaturesPreview(client *client.DatabricksClient) *DisableLegacyFeaturesPreviewAPI { - return &DisableLegacyFeaturesPreviewAPI{ - disableLegacyFeaturesPreviewImpl: disableLegacyFeaturesPreviewImpl{ +func NewDisableLegacyFeatures(client *client.DatabricksClient) *DisableLegacyFeaturesAPI { + return &DisableLegacyFeaturesAPI{ + disableLegacyFeaturesImpl: disableLegacyFeaturesImpl{ client: client, }, } @@ -603,11 +587,11 @@ func NewDisableLegacyFeaturesPreview(client *client.DatabricksClient) *DisableLe // Hive Metastore will not be provisioned. 3. Disables the use of // ‘No-isolation clusters’. 4. Disables Databricks Runtime versions prior to // 13.3LTS. -type DisableLegacyFeaturesPreviewAPI struct { - disableLegacyFeaturesPreviewImpl +type DisableLegacyFeaturesAPI struct { + disableLegacyFeaturesImpl } -type EnableIpAccessListsPreviewInterface interface { +type EnableIpAccessListsInterface interface { // Delete the account IP access toggle setting. // @@ -625,9 +609,9 @@ type EnableIpAccessListsPreviewInterface interface { Update(ctx context.Context, request UpdateAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) } -func NewEnableIpAccessListsPreview(client *client.DatabricksClient) *EnableIpAccessListsPreviewAPI { - return &EnableIpAccessListsPreviewAPI{ - enableIpAccessListsPreviewImpl: enableIpAccessListsPreviewImpl{ +func NewEnableIpAccessLists(client *client.DatabricksClient) *EnableIpAccessListsAPI { + return &EnableIpAccessListsAPI{ + enableIpAccessListsImpl: enableIpAccessListsImpl{ client: client, }, } @@ -636,11 +620,11 @@ func NewEnableIpAccessListsPreview(client *client.DatabricksClient) *EnableIpAcc // Controls the enforcement of IP access lists for accessing the account // console. Allowing you to enable or disable restricted access based on IP // addresses. -type EnableIpAccessListsPreviewAPI struct { - enableIpAccessListsPreviewImpl +type EnableIpAccessListsAPI struct { + enableIpAccessListsImpl } -type EnhancedSecurityMonitoringPreviewInterface interface { +type EnhancedSecurityMonitoringInterface interface { // Get the enhanced security monitoring setting. // @@ -657,9 +641,9 @@ type EnhancedSecurityMonitoringPreviewInterface interface { Update(ctx context.Context, request UpdateEnhancedSecurityMonitoringSettingRequest) (*EnhancedSecurityMonitoringSetting, error) } -func NewEnhancedSecurityMonitoringPreview(client *client.DatabricksClient) *EnhancedSecurityMonitoringPreviewAPI { - return &EnhancedSecurityMonitoringPreviewAPI{ - enhancedSecurityMonitoringPreviewImpl: enhancedSecurityMonitoringPreviewImpl{ +func NewEnhancedSecurityMonitoring(client *client.DatabricksClient) *EnhancedSecurityMonitoringAPI { + return &EnhancedSecurityMonitoringAPI{ + enhancedSecurityMonitoringImpl: enhancedSecurityMonitoringImpl{ client: client, }, } @@ -672,11 +656,11 @@ func NewEnhancedSecurityMonitoringPreview(client *client.DatabricksClient) *Enha // // If the compliance security profile is disabled, you can enable or disable // this setting and it is not permanent. -type EnhancedSecurityMonitoringPreviewAPI struct { - enhancedSecurityMonitoringPreviewImpl +type EnhancedSecurityMonitoringAPI struct { + enhancedSecurityMonitoringImpl } -type EsmEnablementAccountPreviewInterface interface { +type EsmEnablementAccountInterface interface { // Get the enhanced security monitoring setting for new workspaces. // @@ -690,9 +674,9 @@ type EsmEnablementAccountPreviewInterface interface { Update(ctx context.Context, request UpdateEsmEnablementAccountSettingRequest) (*EsmEnablementAccountSetting, error) } -func NewEsmEnablementAccountPreview(client *client.DatabricksClient) *EsmEnablementAccountPreviewAPI { - return &EsmEnablementAccountPreviewAPI{ - esmEnablementAccountPreviewImpl: esmEnablementAccountPreviewImpl{ +func NewEsmEnablementAccount(client *client.DatabricksClient) *EsmEnablementAccountAPI { + return &EsmEnablementAccountAPI{ + esmEnablementAccountImpl: esmEnablementAccountImpl{ client: client, }, } @@ -703,11 +687,11 @@ func NewEsmEnablementAccountPreview(client *client.DatabricksClient) *EsmEnablem // account-level setting is disabled for new workspaces. After workspace // creation, account admins can enable enhanced security monitoring individually // for each workspace. -type EsmEnablementAccountPreviewAPI struct { - esmEnablementAccountPreviewImpl +type EsmEnablementAccountAPI struct { + esmEnablementAccountImpl } -type IpAccessListsPreviewInterface interface { +type IpAccessListsInterface interface { // Create access list. // @@ -763,7 +747,7 @@ type IpAccessListsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context) ([]IpAccessListInfo, error) - // IpAccessListInfoLabelToListIdMap calls [IpAccessListsPreviewAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. + // IpAccessListInfoLabelToListIdMap calls [IpAccessListsAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. // // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. // @@ -772,7 +756,7 @@ type IpAccessListsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. IpAccessListInfoLabelToListIdMap(ctx context.Context) (map[string]string, error) - // GetByLabel calls [IpAccessListsPreviewAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. + // GetByLabel calls [IpAccessListsAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. // // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. // @@ -819,9 +803,9 @@ type IpAccessListsPreviewInterface interface { Update(ctx context.Context, request UpdateIpAccessList) error } -func NewIpAccessListsPreview(client *client.DatabricksClient) *IpAccessListsPreviewAPI { - return &IpAccessListsPreviewAPI{ - ipAccessListsPreviewImpl: ipAccessListsPreviewImpl{ +func NewIpAccessLists(client *client.DatabricksClient) *IpAccessListsAPI { + return &IpAccessListsAPI{ + ipAccessListsImpl: ipAccessListsImpl{ client: client, }, } @@ -848,15 +832,15 @@ func NewIpAccessListsPreview(client *client.DatabricksClient) *IpAccessListsPrev // // After changes to the IP access list feature, it can take a few minutes for // changes to take effect. -type IpAccessListsPreviewAPI struct { - ipAccessListsPreviewImpl +type IpAccessListsAPI struct { + ipAccessListsImpl } // Delete access list. // // Deletes an IP access list, specified by its list ID. -func (a *IpAccessListsPreviewAPI) DeleteByIpAccessListId(ctx context.Context, ipAccessListId string) error { - return a.ipAccessListsPreviewImpl.Delete(ctx, DeleteIpAccessListRequest{ +func (a *IpAccessListsAPI) DeleteByIpAccessListId(ctx context.Context, ipAccessListId string) error { + return a.ipAccessListsImpl.Delete(ctx, DeleteIpAccessListRequest{ IpAccessListId: ipAccessListId, }) } @@ -864,20 +848,20 @@ func (a *IpAccessListsPreviewAPI) DeleteByIpAccessListId(ctx context.Context, ip // Get access list. // // Gets an IP access list, specified by its list ID. -func (a *IpAccessListsPreviewAPI) GetByIpAccessListId(ctx context.Context, ipAccessListId string) (*FetchIpAccessListResponse, error) { - return a.ipAccessListsPreviewImpl.Get(ctx, GetIpAccessListRequest{ +func (a *IpAccessListsAPI) GetByIpAccessListId(ctx context.Context, ipAccessListId string) (*FetchIpAccessListResponse, error) { + return a.ipAccessListsImpl.Get(ctx, GetIpAccessListRequest{ IpAccessListId: ipAccessListId, }) } -// IpAccessListInfoLabelToListIdMap calls [IpAccessListsPreviewAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. +// IpAccessListInfoLabelToListIdMap calls [IpAccessListsAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. // // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. // // Note: All [IpAccessListInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *IpAccessListsPreviewAPI) IpAccessListInfoLabelToListIdMap(ctx context.Context) (map[string]string, error) { +func (a *IpAccessListsAPI) IpAccessListInfoLabelToListIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx) @@ -895,14 +879,14 @@ func (a *IpAccessListsPreviewAPI) IpAccessListInfoLabelToListIdMap(ctx context.C return mapping, nil } -// GetByLabel calls [IpAccessListsPreviewAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. +// GetByLabel calls [IpAccessListsAPI.IpAccessListInfoLabelToListIdMap] and returns a single [IpAccessListInfo]. // // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. // // Note: All [IpAccessListInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *IpAccessListsPreviewAPI) GetByLabel(ctx context.Context, name string) (*IpAccessListInfo, error) { +func (a *IpAccessListsAPI) GetByLabel(ctx context.Context, name string) (*IpAccessListInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx) if err != nil { @@ -923,7 +907,7 @@ func (a *IpAccessListsPreviewAPI) GetByLabel(ctx context.Context, name string) ( return &alternatives[0], nil } -type NetworkConnectivityPreviewInterface interface { +type NetworkConnectivityInterface interface { // Create a network connectivity configuration. CreateNetworkConnectivityConfiguration(ctx context.Context, request CreateNetworkConnectivityConfigRequest) (*NetworkConnectivityConfiguration, error) @@ -1026,9 +1010,9 @@ type NetworkConnectivityPreviewInterface interface { ListPrivateEndpointRulesByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*ListNccAzurePrivateEndpointRulesResponse, error) } -func NewNetworkConnectivityPreview(client *client.DatabricksClient) *NetworkConnectivityPreviewAPI { - return &NetworkConnectivityPreviewAPI{ - networkConnectivityPreviewImpl: networkConnectivityPreviewImpl{ +func NewNetworkConnectivity(client *client.DatabricksClient) *NetworkConnectivityAPI { + return &NetworkConnectivityAPI{ + networkConnectivityImpl: networkConnectivityImpl{ client: client, }, } @@ -1036,15 +1020,15 @@ func NewNetworkConnectivityPreview(client *client.DatabricksClient) *NetworkConn // These APIs provide configurations for the network connectivity of your // workspaces for serverless compute resources. -type NetworkConnectivityPreviewAPI struct { - networkConnectivityPreviewImpl +type NetworkConnectivityAPI struct { + networkConnectivityImpl } // Delete a network connectivity configuration. // // Deletes a network connectivity configuration. -func (a *NetworkConnectivityPreviewAPI) DeleteNetworkConnectivityConfigurationByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) error { - return a.networkConnectivityPreviewImpl.DeleteNetworkConnectivityConfiguration(ctx, DeleteNetworkConnectivityConfigurationRequest{ +func (a *NetworkConnectivityAPI) DeleteNetworkConnectivityConfigurationByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) error { + return a.networkConnectivityImpl.DeleteNetworkConnectivityConfiguration(ctx, DeleteNetworkConnectivityConfigurationRequest{ NetworkConnectivityConfigId: networkConnectivityConfigId, }) } @@ -1057,8 +1041,8 @@ func (a *NetworkConnectivityPreviewAPI) DeleteNetworkConnectivityConfigurationBy // deactivation. When a private endpoint is deactivated, the `deactivated` field // is set to `true` and the private endpoint is not available to your serverless // compute resources. -func (a *NetworkConnectivityPreviewAPI) DeletePrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) { - return a.networkConnectivityPreviewImpl.DeletePrivateEndpointRule(ctx, DeletePrivateEndpointRuleRequest{ +func (a *NetworkConnectivityAPI) DeletePrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) { + return a.networkConnectivityImpl.DeletePrivateEndpointRule(ctx, DeletePrivateEndpointRuleRequest{ NetworkConnectivityConfigId: networkConnectivityConfigId, PrivateEndpointRuleId: privateEndpointRuleId, }) @@ -1067,8 +1051,8 @@ func (a *NetworkConnectivityPreviewAPI) DeletePrivateEndpointRuleByNetworkConnec // Get a network connectivity configuration. // // Gets a network connectivity configuration. -func (a *NetworkConnectivityPreviewAPI) GetNetworkConnectivityConfigurationByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*NetworkConnectivityConfiguration, error) { - return a.networkConnectivityPreviewImpl.GetNetworkConnectivityConfiguration(ctx, GetNetworkConnectivityConfigurationRequest{ +func (a *NetworkConnectivityAPI) GetNetworkConnectivityConfigurationByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*NetworkConnectivityConfiguration, error) { + return a.networkConnectivityImpl.GetNetworkConnectivityConfiguration(ctx, GetNetworkConnectivityConfigurationRequest{ NetworkConnectivityConfigId: networkConnectivityConfigId, }) } @@ -1076,8 +1060,8 @@ func (a *NetworkConnectivityPreviewAPI) GetNetworkConnectivityConfigurationByNet // Get a private endpoint rule. // // Gets the private endpoint rule. -func (a *NetworkConnectivityPreviewAPI) GetPrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) { - return a.networkConnectivityPreviewImpl.GetPrivateEndpointRule(ctx, GetPrivateEndpointRuleRequest{ +func (a *NetworkConnectivityAPI) GetPrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) { + return a.networkConnectivityImpl.GetPrivateEndpointRule(ctx, GetPrivateEndpointRuleRequest{ NetworkConnectivityConfigId: networkConnectivityConfigId, PrivateEndpointRuleId: privateEndpointRuleId, }) @@ -1086,13 +1070,13 @@ func (a *NetworkConnectivityPreviewAPI) GetPrivateEndpointRuleByNetworkConnectiv // List private endpoint rules. // // Gets an array of private endpoint rules. -func (a *NetworkConnectivityPreviewAPI) ListPrivateEndpointRulesByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*ListNccAzurePrivateEndpointRulesResponse, error) { - return a.networkConnectivityPreviewImpl.internalListPrivateEndpointRules(ctx, ListPrivateEndpointRulesRequest{ +func (a *NetworkConnectivityAPI) ListPrivateEndpointRulesByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*ListNccAzurePrivateEndpointRulesResponse, error) { + return a.networkConnectivityImpl.internalListPrivateEndpointRules(ctx, ListPrivateEndpointRulesRequest{ NetworkConnectivityConfigId: networkConnectivityConfigId, }) } -type NotificationDestinationsPreviewInterface interface { +type NotificationDestinationsInterface interface { // Create a notification destination. // @@ -1140,9 +1124,9 @@ type NotificationDestinationsPreviewInterface interface { Update(ctx context.Context, request UpdateNotificationDestinationRequest) (*NotificationDestination, error) } -func NewNotificationDestinationsPreview(client *client.DatabricksClient) *NotificationDestinationsPreviewAPI { - return &NotificationDestinationsPreviewAPI{ - notificationDestinationsPreviewImpl: notificationDestinationsPreviewImpl{ +func NewNotificationDestinations(client *client.DatabricksClient) *NotificationDestinationsAPI { + return &NotificationDestinationsAPI{ + notificationDestinationsImpl: notificationDestinationsImpl{ client: client, }, } @@ -1153,15 +1137,15 @@ func NewNotificationDestinationsPreview(client *client.DatabricksClient) *Notifi // send notifications for query alerts and jobs to destinations outside of // Databricks. Only workspace admins can create, update, and delete notification // destinations. -type NotificationDestinationsPreviewAPI struct { - notificationDestinationsPreviewImpl +type NotificationDestinationsAPI struct { + notificationDestinationsImpl } // Delete a notification destination. // // Deletes a notification destination. Requires workspace admin permissions. -func (a *NotificationDestinationsPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.notificationDestinationsPreviewImpl.Delete(ctx, DeleteNotificationDestinationRequest{ +func (a *NotificationDestinationsAPI) DeleteById(ctx context.Context, id string) error { + return a.notificationDestinationsImpl.Delete(ctx, DeleteNotificationDestinationRequest{ Id: id, }) } @@ -1169,13 +1153,13 @@ func (a *NotificationDestinationsPreviewAPI) DeleteById(ctx context.Context, id // Get a notification destination. // // Gets a notification destination. -func (a *NotificationDestinationsPreviewAPI) GetById(ctx context.Context, id string) (*NotificationDestination, error) { - return a.notificationDestinationsPreviewImpl.Get(ctx, GetNotificationDestinationRequest{ +func (a *NotificationDestinationsAPI) GetById(ctx context.Context, id string) (*NotificationDestination, error) { + return a.notificationDestinationsImpl.Get(ctx, GetNotificationDestinationRequest{ Id: id, }) } -type PersonalComputePreviewInterface interface { +type PersonalComputeInterface interface { // Delete Personal Compute setting. // @@ -1193,9 +1177,9 @@ type PersonalComputePreviewInterface interface { Update(ctx context.Context, request UpdatePersonalComputeSettingRequest) (*PersonalComputeSetting, error) } -func NewPersonalComputePreview(client *client.DatabricksClient) *PersonalComputePreviewAPI { - return &PersonalComputePreviewAPI{ - personalComputePreviewImpl: personalComputePreviewImpl{ +func NewPersonalCompute(client *client.DatabricksClient) *PersonalComputeAPI { + return &PersonalComputeAPI{ + personalComputeImpl: personalComputeImpl{ client: client, }, } @@ -1210,11 +1194,11 @@ func NewPersonalComputePreview(client *client.DatabricksClient) *PersonalCompute // has a default value, this setting is present on all accounts even though it's // never set on a given account. Deletion reverts the value of the setting back // to the default value. -type PersonalComputePreviewAPI struct { - personalComputePreviewImpl +type PersonalComputeAPI struct { + personalComputeImpl } -type RestrictWorkspaceAdminsPreviewInterface interface { +type RestrictWorkspaceAdminsInterface interface { // Delete the restrict workspace admins setting. // @@ -1241,9 +1225,9 @@ type RestrictWorkspaceAdminsPreviewInterface interface { Update(ctx context.Context, request UpdateRestrictWorkspaceAdminsSettingRequest) (*RestrictWorkspaceAdminsSetting, error) } -func NewRestrictWorkspaceAdminsPreview(client *client.DatabricksClient) *RestrictWorkspaceAdminsPreviewAPI { - return &RestrictWorkspaceAdminsPreviewAPI{ - restrictWorkspaceAdminsPreviewImpl: restrictWorkspaceAdminsPreviewImpl{ +func NewRestrictWorkspaceAdmins(client *client.DatabricksClient) *RestrictWorkspaceAdminsAPI { + return &RestrictWorkspaceAdminsAPI{ + restrictWorkspaceAdminsImpl: restrictWorkspaceAdminsImpl{ client: client, }, } @@ -1261,8 +1245,8 @@ func NewRestrictWorkspaceAdminsPreview(client *client.DatabricksClient) *Restric // User role on. They can also only change a job owner to themselves. And they // can change the job run_as setting to themselves or to a service principal on // which they have the Service Principal User role. -type RestrictWorkspaceAdminsPreviewAPI struct { - restrictWorkspaceAdminsPreviewImpl +type RestrictWorkspaceAdminsAPI struct { + restrictWorkspaceAdminsImpl } type SettingsInterface interface { @@ -1282,24 +1266,7 @@ type SettingsAPI struct { settingsImpl } -type SettingsPreviewInterface interface { -} - -func NewSettingsPreview(client *client.DatabricksClient) *SettingsPreviewAPI { - return &SettingsPreviewAPI{ - settingsPreviewImpl: settingsPreviewImpl{ - client: client, - }, - } -} - -// Workspace Settings API allows users to manage settings at the workspace -// level. -type SettingsPreviewAPI struct { - settingsPreviewImpl -} - -type TokenManagementPreviewInterface interface { +type TokenManagementInterface interface { // Create on-behalf token. // @@ -1351,7 +1318,7 @@ type TokenManagementPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListTokenManagementRequest) ([]TokenInfo, error) - // TokenInfoCommentToTokenIdMap calls [TokenManagementPreviewAPI.ListAll] and creates a map of results with [TokenInfo].Comment as key and [TokenInfo].TokenId as value. + // TokenInfoCommentToTokenIdMap calls [TokenManagementAPI.ListAll] and creates a map of results with [TokenInfo].Comment as key and [TokenInfo].TokenId as value. // // Returns an error if there's more than one [TokenInfo] with the same .Comment. // @@ -1360,7 +1327,7 @@ type TokenManagementPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. TokenInfoCommentToTokenIdMap(ctx context.Context, request ListTokenManagementRequest) (map[string]string, error) - // GetByComment calls [TokenManagementPreviewAPI.TokenInfoCommentToTokenIdMap] and returns a single [TokenInfo]. + // GetByComment calls [TokenManagementAPI.TokenInfoCommentToTokenIdMap] and returns a single [TokenInfo]. // // Returns an error if there's more than one [TokenInfo] with the same .Comment. // @@ -1383,9 +1350,9 @@ type TokenManagementPreviewInterface interface { UpdatePermissions(ctx context.Context, request TokenPermissionsRequest) (*TokenPermissions, error) } -func NewTokenManagementPreview(client *client.DatabricksClient) *TokenManagementPreviewAPI { - return &TokenManagementPreviewAPI{ - tokenManagementPreviewImpl: tokenManagementPreviewImpl{ +func NewTokenManagement(client *client.DatabricksClient) *TokenManagementAPI { + return &TokenManagementAPI{ + tokenManagementImpl: tokenManagementImpl{ client: client, }, } @@ -1394,15 +1361,15 @@ func NewTokenManagementPreview(client *client.DatabricksClient) *TokenManagement // Enables administrators to get all tokens and delete tokens for other users. // Admins can either get every token, get a specific token by ID, or get all // tokens for a particular user. -type TokenManagementPreviewAPI struct { - tokenManagementPreviewImpl +type TokenManagementAPI struct { + tokenManagementImpl } // Delete a token. // // Deletes a token, specified by its ID. -func (a *TokenManagementPreviewAPI) DeleteByTokenId(ctx context.Context, tokenId string) error { - return a.tokenManagementPreviewImpl.Delete(ctx, DeleteTokenManagementRequest{ +func (a *TokenManagementAPI) DeleteByTokenId(ctx context.Context, tokenId string) error { + return a.tokenManagementImpl.Delete(ctx, DeleteTokenManagementRequest{ TokenId: tokenId, }) } @@ -1410,20 +1377,20 @@ func (a *TokenManagementPreviewAPI) DeleteByTokenId(ctx context.Context, tokenId // Get token info. // // Gets information about a token, specified by its ID. -func (a *TokenManagementPreviewAPI) GetByTokenId(ctx context.Context, tokenId string) (*GetTokenResponse, error) { - return a.tokenManagementPreviewImpl.Get(ctx, GetTokenManagementRequest{ +func (a *TokenManagementAPI) GetByTokenId(ctx context.Context, tokenId string) (*GetTokenResponse, error) { + return a.tokenManagementImpl.Get(ctx, GetTokenManagementRequest{ TokenId: tokenId, }) } -// TokenInfoCommentToTokenIdMap calls [TokenManagementPreviewAPI.ListAll] and creates a map of results with [TokenInfo].Comment as key and [TokenInfo].TokenId as value. +// TokenInfoCommentToTokenIdMap calls [TokenManagementAPI.ListAll] and creates a map of results with [TokenInfo].Comment as key and [TokenInfo].TokenId as value. // // Returns an error if there's more than one [TokenInfo] with the same .Comment. // // Note: All [TokenInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *TokenManagementPreviewAPI) TokenInfoCommentToTokenIdMap(ctx context.Context, request ListTokenManagementRequest) (map[string]string, error) { +func (a *TokenManagementAPI) TokenInfoCommentToTokenIdMap(ctx context.Context, request ListTokenManagementRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -1441,14 +1408,14 @@ func (a *TokenManagementPreviewAPI) TokenInfoCommentToTokenIdMap(ctx context.Con return mapping, nil } -// GetByComment calls [TokenManagementPreviewAPI.TokenInfoCommentToTokenIdMap] and returns a single [TokenInfo]. +// GetByComment calls [TokenManagementAPI.TokenInfoCommentToTokenIdMap] and returns a single [TokenInfo]. // // Returns an error if there's more than one [TokenInfo] with the same .Comment. // // Note: All [TokenInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *TokenManagementPreviewAPI) GetByComment(ctx context.Context, name string) (*TokenInfo, error) { +func (a *TokenManagementAPI) GetByComment(ctx context.Context, name string) (*TokenInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListTokenManagementRequest{}) if err != nil { @@ -1469,7 +1436,7 @@ func (a *TokenManagementPreviewAPI) GetByComment(ctx context.Context, name strin return &alternatives[0], nil } -type TokensPreviewInterface interface { +type TokensInterface interface { // Create a user token. // @@ -1509,7 +1476,7 @@ type TokensPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context) ([]PublicTokenInfo, error) - // PublicTokenInfoCommentToTokenIdMap calls [TokensPreviewAPI.ListAll] and creates a map of results with [PublicTokenInfo].Comment as key and [PublicTokenInfo].TokenId as value. + // PublicTokenInfoCommentToTokenIdMap calls [TokensAPI.ListAll] and creates a map of results with [PublicTokenInfo].Comment as key and [PublicTokenInfo].TokenId as value. // // Returns an error if there's more than one [PublicTokenInfo] with the same .Comment. // @@ -1518,7 +1485,7 @@ type TokensPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. PublicTokenInfoCommentToTokenIdMap(ctx context.Context) (map[string]string, error) - // GetByComment calls [TokensPreviewAPI.PublicTokenInfoCommentToTokenIdMap] and returns a single [PublicTokenInfo]. + // GetByComment calls [TokensAPI.PublicTokenInfoCommentToTokenIdMap] and returns a single [PublicTokenInfo]. // // Returns an error if there's more than one [PublicTokenInfo] with the same .Comment. // @@ -1528,9 +1495,9 @@ type TokensPreviewInterface interface { GetByComment(ctx context.Context, name string) (*PublicTokenInfo, error) } -func NewTokensPreview(client *client.DatabricksClient) *TokensPreviewAPI { - return &TokensPreviewAPI{ - tokensPreviewImpl: tokensPreviewImpl{ +func NewTokens(client *client.DatabricksClient) *TokensAPI { + return &TokensAPI{ + tokensImpl: tokensImpl{ client: client, }, } @@ -1538,8 +1505,8 @@ func NewTokensPreview(client *client.DatabricksClient) *TokensPreviewAPI { // The Token API allows you to create, list, and revoke tokens that can be used // to authenticate and access Databricks REST APIs. -type TokensPreviewAPI struct { - tokensPreviewImpl +type TokensAPI struct { + tokensImpl } // Revoke token. @@ -1548,20 +1515,20 @@ type TokensPreviewAPI struct { // // If a token with the specified ID is not valid, this call returns an error // **RESOURCE_DOES_NOT_EXIST**. -func (a *TokensPreviewAPI) DeleteByTokenId(ctx context.Context, tokenId string) error { - return a.tokensPreviewImpl.Delete(ctx, RevokeTokenRequest{ +func (a *TokensAPI) DeleteByTokenId(ctx context.Context, tokenId string) error { + return a.tokensImpl.Delete(ctx, RevokeTokenRequest{ TokenId: tokenId, }) } -// PublicTokenInfoCommentToTokenIdMap calls [TokensPreviewAPI.ListAll] and creates a map of results with [PublicTokenInfo].Comment as key and [PublicTokenInfo].TokenId as value. +// PublicTokenInfoCommentToTokenIdMap calls [TokensAPI.ListAll] and creates a map of results with [PublicTokenInfo].Comment as key and [PublicTokenInfo].TokenId as value. // // Returns an error if there's more than one [PublicTokenInfo] with the same .Comment. // // Note: All [PublicTokenInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *TokensPreviewAPI) PublicTokenInfoCommentToTokenIdMap(ctx context.Context) (map[string]string, error) { +func (a *TokensAPI) PublicTokenInfoCommentToTokenIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx) @@ -1579,14 +1546,14 @@ func (a *TokensPreviewAPI) PublicTokenInfoCommentToTokenIdMap(ctx context.Contex return mapping, nil } -// GetByComment calls [TokensPreviewAPI.PublicTokenInfoCommentToTokenIdMap] and returns a single [PublicTokenInfo]. +// GetByComment calls [TokensAPI.PublicTokenInfoCommentToTokenIdMap] and returns a single [PublicTokenInfo]. // // Returns an error if there's more than one [PublicTokenInfo] with the same .Comment. // // Note: All [PublicTokenInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *TokensPreviewAPI) GetByComment(ctx context.Context, name string) (*PublicTokenInfo, error) { +func (a *TokensAPI) GetByComment(ctx context.Context, name string) (*PublicTokenInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx) if err != nil { @@ -1607,7 +1574,7 @@ func (a *TokensPreviewAPI) GetByComment(ctx context.Context, name string) (*Publ return &alternatives[0], nil } -type WorkspaceConfPreviewInterface interface { +type WorkspaceConfInterface interface { // Check configuration status. // @@ -1621,15 +1588,15 @@ type WorkspaceConfPreviewInterface interface { SetStatus(ctx context.Context, request WorkspaceConf) error } -func NewWorkspaceConfPreview(client *client.DatabricksClient) *WorkspaceConfPreviewAPI { - return &WorkspaceConfPreviewAPI{ - workspaceConfPreviewImpl: workspaceConfPreviewImpl{ +func NewWorkspaceConf(client *client.DatabricksClient) *WorkspaceConfAPI { + return &WorkspaceConfAPI{ + workspaceConfImpl: workspaceConfImpl{ client: client, }, } } // This API allows updating known workspace settings for advanced users. -type WorkspaceConfPreviewAPI struct { - workspaceConfPreviewImpl +type WorkspaceConfAPI struct { + workspaceConfImpl } diff --git a/settings/v2preview/client.go b/settings/v2preview/client.go index add625788..4ee9b3b0d 100755 --- a/settings/v2preview/client.go +++ b/settings/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type AccountIpAccessListsPreviewClient struct { - AccountIpAccessListsPreviewInterface +type AccountIpAccessListsClient struct { + AccountIpAccessListsInterface Config *config.Config } -func NewAccountIpAccessListsPreviewClient(cfg *config.Config) (*AccountIpAccessListsPreviewClient, error) { +func NewAccountIpAccessListsClient(cfg *config.Config) (*AccountIpAccessListsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -34,9 +34,9 @@ func NewAccountIpAccessListsPreviewClient(cfg *config.Config) (*AccountIpAccessL return nil, err } - return &AccountIpAccessListsPreviewClient{ - Config: cfg, - AccountIpAccessListsPreviewInterface: NewAccountIpAccessListsPreview(apiClient), + return &AccountIpAccessListsClient{ + Config: cfg, + AccountIpAccessListsInterface: NewAccountIpAccessLists(apiClient), }, nil } @@ -70,43 +70,13 @@ func NewAccountSettingsClient(cfg *config.Config) (*AccountSettingsClient, error }, nil } -type AccountSettingsPreviewClient struct { - AccountSettingsPreviewInterface - - Config *config.Config -} - -func NewAccountSettingsPreviewClient(cfg *config.Config) (*AccountSettingsPreviewClient, error) { - if cfg == nil { - cfg = &config.Config{} - } - - err := cfg.EnsureResolved() - if err != nil { - return nil, err - } - - if cfg.AccountID == "" || !cfg.IsAccountClient() { - return nil, errors.New("invalid configuration: please provide a valid account config for the requested account service client") - } - apiClient, err := client.New(cfg) - if err != nil { - return nil, err - } - - return &AccountSettingsPreviewClient{ - Config: cfg, - AccountSettingsPreviewInterface: NewAccountSettingsPreview(apiClient), - }, nil -} - -type AibiDashboardEmbeddingAccessPolicyPreviewClient struct { - AibiDashboardEmbeddingAccessPolicyPreviewInterface +type AibiDashboardEmbeddingAccessPolicyClient struct { + AibiDashboardEmbeddingAccessPolicyInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewAibiDashboardEmbeddingAccessPolicyPreviewClient(cfg *config.Config) (*AibiDashboardEmbeddingAccessPolicyPreviewClient, error) { +func NewAibiDashboardEmbeddingAccessPolicyClient(cfg *config.Config) (*AibiDashboardEmbeddingAccessPolicyClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -127,20 +97,20 @@ func NewAibiDashboardEmbeddingAccessPolicyPreviewClient(cfg *config.Config) (*Ai return nil, err } - return &AibiDashboardEmbeddingAccessPolicyPreviewClient{ + return &AibiDashboardEmbeddingAccessPolicyClient{ Config: cfg, apiClient: apiClient, - AibiDashboardEmbeddingAccessPolicyPreviewInterface: NewAibiDashboardEmbeddingAccessPolicyPreview(databricksClient), + AibiDashboardEmbeddingAccessPolicyInterface: NewAibiDashboardEmbeddingAccessPolicy(databricksClient), }, nil } -type AibiDashboardEmbeddingApprovedDomainsPreviewClient struct { - AibiDashboardEmbeddingApprovedDomainsPreviewInterface +type AibiDashboardEmbeddingApprovedDomainsClient struct { + AibiDashboardEmbeddingApprovedDomainsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewAibiDashboardEmbeddingApprovedDomainsPreviewClient(cfg *config.Config) (*AibiDashboardEmbeddingApprovedDomainsPreviewClient, error) { +func NewAibiDashboardEmbeddingApprovedDomainsClient(cfg *config.Config) (*AibiDashboardEmbeddingApprovedDomainsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -161,20 +131,20 @@ func NewAibiDashboardEmbeddingApprovedDomainsPreviewClient(cfg *config.Config) ( return nil, err } - return &AibiDashboardEmbeddingApprovedDomainsPreviewClient{ + return &AibiDashboardEmbeddingApprovedDomainsClient{ Config: cfg, apiClient: apiClient, - AibiDashboardEmbeddingApprovedDomainsPreviewInterface: NewAibiDashboardEmbeddingApprovedDomainsPreview(databricksClient), + AibiDashboardEmbeddingApprovedDomainsInterface: NewAibiDashboardEmbeddingApprovedDomains(databricksClient), }, nil } -type AutomaticClusterUpdatePreviewClient struct { - AutomaticClusterUpdatePreviewInterface +type AutomaticClusterUpdateClient struct { + AutomaticClusterUpdateInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewAutomaticClusterUpdatePreviewClient(cfg *config.Config) (*AutomaticClusterUpdatePreviewClient, error) { +func NewAutomaticClusterUpdateClient(cfg *config.Config) (*AutomaticClusterUpdateClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -195,20 +165,20 @@ func NewAutomaticClusterUpdatePreviewClient(cfg *config.Config) (*AutomaticClust return nil, err } - return &AutomaticClusterUpdatePreviewClient{ - Config: cfg, - apiClient: apiClient, - AutomaticClusterUpdatePreviewInterface: NewAutomaticClusterUpdatePreview(databricksClient), + return &AutomaticClusterUpdateClient{ + Config: cfg, + apiClient: apiClient, + AutomaticClusterUpdateInterface: NewAutomaticClusterUpdate(databricksClient), }, nil } -type ComplianceSecurityProfilePreviewClient struct { - ComplianceSecurityProfilePreviewInterface +type ComplianceSecurityProfileClient struct { + ComplianceSecurityProfileInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewComplianceSecurityProfilePreviewClient(cfg *config.Config) (*ComplianceSecurityProfilePreviewClient, error) { +func NewComplianceSecurityProfileClient(cfg *config.Config) (*ComplianceSecurityProfileClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -229,20 +199,20 @@ func NewComplianceSecurityProfilePreviewClient(cfg *config.Config) (*ComplianceS return nil, err } - return &ComplianceSecurityProfilePreviewClient{ - Config: cfg, - apiClient: apiClient, - ComplianceSecurityProfilePreviewInterface: NewComplianceSecurityProfilePreview(databricksClient), + return &ComplianceSecurityProfileClient{ + Config: cfg, + apiClient: apiClient, + ComplianceSecurityProfileInterface: NewComplianceSecurityProfile(databricksClient), }, nil } -type CredentialsManagerPreviewClient struct { - CredentialsManagerPreviewInterface +type CredentialsManagerClient struct { + CredentialsManagerInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewCredentialsManagerPreviewClient(cfg *config.Config) (*CredentialsManagerPreviewClient, error) { +func NewCredentialsManagerClient(cfg *config.Config) (*CredentialsManagerClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -263,20 +233,20 @@ func NewCredentialsManagerPreviewClient(cfg *config.Config) (*CredentialsManager return nil, err } - return &CredentialsManagerPreviewClient{ - Config: cfg, - apiClient: apiClient, - CredentialsManagerPreviewInterface: NewCredentialsManagerPreview(databricksClient), + return &CredentialsManagerClient{ + Config: cfg, + apiClient: apiClient, + CredentialsManagerInterface: NewCredentialsManager(databricksClient), }, nil } -type CspEnablementAccountPreviewClient struct { - CspEnablementAccountPreviewInterface +type CspEnablementAccountClient struct { + CspEnablementAccountInterface Config *config.Config } -func NewCspEnablementAccountPreviewClient(cfg *config.Config) (*CspEnablementAccountPreviewClient, error) { +func NewCspEnablementAccountClient(cfg *config.Config) (*CspEnablementAccountClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -294,19 +264,19 @@ func NewCspEnablementAccountPreviewClient(cfg *config.Config) (*CspEnablementAcc return nil, err } - return &CspEnablementAccountPreviewClient{ - Config: cfg, - CspEnablementAccountPreviewInterface: NewCspEnablementAccountPreview(apiClient), + return &CspEnablementAccountClient{ + Config: cfg, + CspEnablementAccountInterface: NewCspEnablementAccount(apiClient), }, nil } -type DefaultNamespacePreviewClient struct { - DefaultNamespacePreviewInterface +type DefaultNamespaceClient struct { + DefaultNamespaceInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewDefaultNamespacePreviewClient(cfg *config.Config) (*DefaultNamespacePreviewClient, error) { +func NewDefaultNamespaceClient(cfg *config.Config) (*DefaultNamespaceClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -327,20 +297,20 @@ func NewDefaultNamespacePreviewClient(cfg *config.Config) (*DefaultNamespacePrev return nil, err } - return &DefaultNamespacePreviewClient{ - Config: cfg, - apiClient: apiClient, - DefaultNamespacePreviewInterface: NewDefaultNamespacePreview(databricksClient), + return &DefaultNamespaceClient{ + Config: cfg, + apiClient: apiClient, + DefaultNamespaceInterface: NewDefaultNamespace(databricksClient), }, nil } -type DisableLegacyAccessPreviewClient struct { - DisableLegacyAccessPreviewInterface +type DisableLegacyAccessClient struct { + DisableLegacyAccessInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewDisableLegacyAccessPreviewClient(cfg *config.Config) (*DisableLegacyAccessPreviewClient, error) { +func NewDisableLegacyAccessClient(cfg *config.Config) (*DisableLegacyAccessClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -361,20 +331,20 @@ func NewDisableLegacyAccessPreviewClient(cfg *config.Config) (*DisableLegacyAcce return nil, err } - return &DisableLegacyAccessPreviewClient{ - Config: cfg, - apiClient: apiClient, - DisableLegacyAccessPreviewInterface: NewDisableLegacyAccessPreview(databricksClient), + return &DisableLegacyAccessClient{ + Config: cfg, + apiClient: apiClient, + DisableLegacyAccessInterface: NewDisableLegacyAccess(databricksClient), }, nil } -type DisableLegacyDbfsPreviewClient struct { - DisableLegacyDbfsPreviewInterface +type DisableLegacyDbfsClient struct { + DisableLegacyDbfsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewDisableLegacyDbfsPreviewClient(cfg *config.Config) (*DisableLegacyDbfsPreviewClient, error) { +func NewDisableLegacyDbfsClient(cfg *config.Config) (*DisableLegacyDbfsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -395,20 +365,20 @@ func NewDisableLegacyDbfsPreviewClient(cfg *config.Config) (*DisableLegacyDbfsPr return nil, err } - return &DisableLegacyDbfsPreviewClient{ - Config: cfg, - apiClient: apiClient, - DisableLegacyDbfsPreviewInterface: NewDisableLegacyDbfsPreview(databricksClient), + return &DisableLegacyDbfsClient{ + Config: cfg, + apiClient: apiClient, + DisableLegacyDbfsInterface: NewDisableLegacyDbfs(databricksClient), }, nil } -type DisableLegacyFeaturesPreviewClient struct { - DisableLegacyFeaturesPreviewInterface +type DisableLegacyFeaturesClient struct { + DisableLegacyFeaturesInterface Config *config.Config } -func NewDisableLegacyFeaturesPreviewClient(cfg *config.Config) (*DisableLegacyFeaturesPreviewClient, error) { +func NewDisableLegacyFeaturesClient(cfg *config.Config) (*DisableLegacyFeaturesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -426,19 +396,19 @@ func NewDisableLegacyFeaturesPreviewClient(cfg *config.Config) (*DisableLegacyFe return nil, err } - return &DisableLegacyFeaturesPreviewClient{ - Config: cfg, - DisableLegacyFeaturesPreviewInterface: NewDisableLegacyFeaturesPreview(apiClient), + return &DisableLegacyFeaturesClient{ + Config: cfg, + DisableLegacyFeaturesInterface: NewDisableLegacyFeatures(apiClient), }, nil } -type EnableIpAccessListsPreviewClient struct { - EnableIpAccessListsPreviewInterface +type EnableIpAccessListsClient struct { + EnableIpAccessListsInterface Config *config.Config } -func NewEnableIpAccessListsPreviewClient(cfg *config.Config) (*EnableIpAccessListsPreviewClient, error) { +func NewEnableIpAccessListsClient(cfg *config.Config) (*EnableIpAccessListsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -456,19 +426,19 @@ func NewEnableIpAccessListsPreviewClient(cfg *config.Config) (*EnableIpAccessLis return nil, err } - return &EnableIpAccessListsPreviewClient{ - Config: cfg, - EnableIpAccessListsPreviewInterface: NewEnableIpAccessListsPreview(apiClient), + return &EnableIpAccessListsClient{ + Config: cfg, + EnableIpAccessListsInterface: NewEnableIpAccessLists(apiClient), }, nil } -type EnhancedSecurityMonitoringPreviewClient struct { - EnhancedSecurityMonitoringPreviewInterface +type EnhancedSecurityMonitoringClient struct { + EnhancedSecurityMonitoringInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewEnhancedSecurityMonitoringPreviewClient(cfg *config.Config) (*EnhancedSecurityMonitoringPreviewClient, error) { +func NewEnhancedSecurityMonitoringClient(cfg *config.Config) (*EnhancedSecurityMonitoringClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -489,20 +459,20 @@ func NewEnhancedSecurityMonitoringPreviewClient(cfg *config.Config) (*EnhancedSe return nil, err } - return &EnhancedSecurityMonitoringPreviewClient{ - Config: cfg, - apiClient: apiClient, - EnhancedSecurityMonitoringPreviewInterface: NewEnhancedSecurityMonitoringPreview(databricksClient), + return &EnhancedSecurityMonitoringClient{ + Config: cfg, + apiClient: apiClient, + EnhancedSecurityMonitoringInterface: NewEnhancedSecurityMonitoring(databricksClient), }, nil } -type EsmEnablementAccountPreviewClient struct { - EsmEnablementAccountPreviewInterface +type EsmEnablementAccountClient struct { + EsmEnablementAccountInterface Config *config.Config } -func NewEsmEnablementAccountPreviewClient(cfg *config.Config) (*EsmEnablementAccountPreviewClient, error) { +func NewEsmEnablementAccountClient(cfg *config.Config) (*EsmEnablementAccountClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -520,19 +490,19 @@ func NewEsmEnablementAccountPreviewClient(cfg *config.Config) (*EsmEnablementAcc return nil, err } - return &EsmEnablementAccountPreviewClient{ - Config: cfg, - EsmEnablementAccountPreviewInterface: NewEsmEnablementAccountPreview(apiClient), + return &EsmEnablementAccountClient{ + Config: cfg, + EsmEnablementAccountInterface: NewEsmEnablementAccount(apiClient), }, nil } -type IpAccessListsPreviewClient struct { - IpAccessListsPreviewInterface +type IpAccessListsClient struct { + IpAccessListsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewIpAccessListsPreviewClient(cfg *config.Config) (*IpAccessListsPreviewClient, error) { +func NewIpAccessListsClient(cfg *config.Config) (*IpAccessListsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -553,20 +523,20 @@ func NewIpAccessListsPreviewClient(cfg *config.Config) (*IpAccessListsPreviewCli return nil, err } - return &IpAccessListsPreviewClient{ - Config: cfg, - apiClient: apiClient, - IpAccessListsPreviewInterface: NewIpAccessListsPreview(databricksClient), + return &IpAccessListsClient{ + Config: cfg, + apiClient: apiClient, + IpAccessListsInterface: NewIpAccessLists(databricksClient), }, nil } -type NetworkConnectivityPreviewClient struct { - NetworkConnectivityPreviewInterface +type NetworkConnectivityClient struct { + NetworkConnectivityInterface Config *config.Config } -func NewNetworkConnectivityPreviewClient(cfg *config.Config) (*NetworkConnectivityPreviewClient, error) { +func NewNetworkConnectivityClient(cfg *config.Config) (*NetworkConnectivityClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -584,19 +554,19 @@ func NewNetworkConnectivityPreviewClient(cfg *config.Config) (*NetworkConnectivi return nil, err } - return &NetworkConnectivityPreviewClient{ - Config: cfg, - NetworkConnectivityPreviewInterface: NewNetworkConnectivityPreview(apiClient), + return &NetworkConnectivityClient{ + Config: cfg, + NetworkConnectivityInterface: NewNetworkConnectivity(apiClient), }, nil } -type NotificationDestinationsPreviewClient struct { - NotificationDestinationsPreviewInterface +type NotificationDestinationsClient struct { + NotificationDestinationsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewNotificationDestinationsPreviewClient(cfg *config.Config) (*NotificationDestinationsPreviewClient, error) { +func NewNotificationDestinationsClient(cfg *config.Config) (*NotificationDestinationsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -617,20 +587,20 @@ func NewNotificationDestinationsPreviewClient(cfg *config.Config) (*Notification return nil, err } - return &NotificationDestinationsPreviewClient{ - Config: cfg, - apiClient: apiClient, - NotificationDestinationsPreviewInterface: NewNotificationDestinationsPreview(databricksClient), + return &NotificationDestinationsClient{ + Config: cfg, + apiClient: apiClient, + NotificationDestinationsInterface: NewNotificationDestinations(databricksClient), }, nil } -type PersonalComputePreviewClient struct { - PersonalComputePreviewInterface +type PersonalComputeClient struct { + PersonalComputeInterface Config *config.Config } -func NewPersonalComputePreviewClient(cfg *config.Config) (*PersonalComputePreviewClient, error) { +func NewPersonalComputeClient(cfg *config.Config) (*PersonalComputeClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -648,19 +618,19 @@ func NewPersonalComputePreviewClient(cfg *config.Config) (*PersonalComputePrevie return nil, err } - return &PersonalComputePreviewClient{ - Config: cfg, - PersonalComputePreviewInterface: NewPersonalComputePreview(apiClient), + return &PersonalComputeClient{ + Config: cfg, + PersonalComputeInterface: NewPersonalCompute(apiClient), }, nil } -type RestrictWorkspaceAdminsPreviewClient struct { - RestrictWorkspaceAdminsPreviewInterface +type RestrictWorkspaceAdminsClient struct { + RestrictWorkspaceAdminsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewRestrictWorkspaceAdminsPreviewClient(cfg *config.Config) (*RestrictWorkspaceAdminsPreviewClient, error) { +func NewRestrictWorkspaceAdminsClient(cfg *config.Config) (*RestrictWorkspaceAdminsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -681,10 +651,10 @@ func NewRestrictWorkspaceAdminsPreviewClient(cfg *config.Config) (*RestrictWorks return nil, err } - return &RestrictWorkspaceAdminsPreviewClient{ - Config: cfg, - apiClient: apiClient, - RestrictWorkspaceAdminsPreviewInterface: NewRestrictWorkspaceAdminsPreview(databricksClient), + return &RestrictWorkspaceAdminsClient{ + Config: cfg, + apiClient: apiClient, + RestrictWorkspaceAdminsInterface: NewRestrictWorkspaceAdmins(databricksClient), }, nil } @@ -722,13 +692,13 @@ func NewSettingsClient(cfg *config.Config) (*SettingsClient, error) { }, nil } -type SettingsPreviewClient struct { - SettingsPreviewInterface +type TokenManagementClient struct { + TokenManagementInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewSettingsPreviewClient(cfg *config.Config) (*SettingsPreviewClient, error) { +func NewTokenManagementClient(cfg *config.Config) (*TokenManagementClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -749,20 +719,20 @@ func NewSettingsPreviewClient(cfg *config.Config) (*SettingsPreviewClient, error return nil, err } - return &SettingsPreviewClient{ + return &TokenManagementClient{ Config: cfg, apiClient: apiClient, - SettingsPreviewInterface: NewSettingsPreview(databricksClient), + TokenManagementInterface: NewTokenManagement(databricksClient), }, nil } -type TokenManagementPreviewClient struct { - TokenManagementPreviewInterface +type TokensClient struct { + TokensInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewTokenManagementPreviewClient(cfg *config.Config) (*TokenManagementPreviewClient, error) { +func NewTokensClient(cfg *config.Config) (*TokensClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -783,20 +753,20 @@ func NewTokenManagementPreviewClient(cfg *config.Config) (*TokenManagementPrevie return nil, err } - return &TokenManagementPreviewClient{ - Config: cfg, - apiClient: apiClient, - TokenManagementPreviewInterface: NewTokenManagementPreview(databricksClient), + return &TokensClient{ + Config: cfg, + apiClient: apiClient, + TokensInterface: NewTokens(databricksClient), }, nil } -type TokensPreviewClient struct { - TokensPreviewInterface +type WorkspaceConfClient struct { + WorkspaceConfInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewTokensPreviewClient(cfg *config.Config) (*TokensPreviewClient, error) { +func NewWorkspaceConfClient(cfg *config.Config) (*WorkspaceConfClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -817,43 +787,9 @@ func NewTokensPreviewClient(cfg *config.Config) (*TokensPreviewClient, error) { return nil, err } - return &TokensPreviewClient{ + return &WorkspaceConfClient{ Config: cfg, apiClient: apiClient, - TokensPreviewInterface: NewTokensPreview(databricksClient), - }, nil -} - -type WorkspaceConfPreviewClient struct { - WorkspaceConfPreviewInterface - Config *config.Config - apiClient *httpclient.ApiClient -} - -func NewWorkspaceConfPreviewClient(cfg *config.Config) (*WorkspaceConfPreviewClient, error) { - if cfg == nil { - cfg = &config.Config{} - } - - err := cfg.EnsureResolved() - if err != nil { - return nil, err - } - if cfg.IsAccountClient() { - return nil, errors.New("invalid configuration: please provide a valid workspace config for the requested workspace service client") - } - apiClient, err := cfg.NewApiClient() - if err != nil { - return nil, err - } - databricksClient, err := client.NewWithClient(cfg, apiClient) - if err != nil { - return nil, err - } - - return &WorkspaceConfPreviewClient{ - Config: cfg, - apiClient: apiClient, - WorkspaceConfPreviewInterface: NewWorkspaceConfPreview(databricksClient), + WorkspaceConfInterface: NewWorkspaceConf(databricksClient), }, nil } diff --git a/settings/v2preview/impl.go b/settings/v2preview/impl.go index 35d75cf7f..87e1a4c59 100755 --- a/settings/v2preview/impl.go +++ b/settings/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just AccountIpAccessListsPreview API methods -type accountIpAccessListsPreviewImpl struct { +// unexported type that holds implementations of just AccountIpAccessLists API methods +type accountIpAccessListsImpl struct { client *client.DatabricksClient } -func (a *accountIpAccessListsPreviewImpl) Create(ctx context.Context, request CreateIpAccessList) (*CreateIpAccessListResponse, error) { +func (a *accountIpAccessListsImpl) Create(ctx context.Context, request CreateIpAccessList) (*CreateIpAccessListResponse, error) { var createIpAccessListResponse CreateIpAccessListResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -28,7 +28,7 @@ func (a *accountIpAccessListsPreviewImpl) Create(ctx context.Context, request Cr return &createIpAccessListResponse, err } -func (a *accountIpAccessListsPreviewImpl) Delete(ctx context.Context, request DeleteAccountIpAccessListRequest) error { +func (a *accountIpAccessListsImpl) Delete(ctx context.Context, request DeleteAccountIpAccessListRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) queryParams := make(map[string]any) @@ -38,7 +38,7 @@ func (a *accountIpAccessListsPreviewImpl) Delete(ctx context.Context, request De return err } -func (a *accountIpAccessListsPreviewImpl) Get(ctx context.Context, request GetAccountIpAccessListRequest) (*GetIpAccessListResponse, error) { +func (a *accountIpAccessListsImpl) Get(ctx context.Context, request GetAccountIpAccessListRequest) (*GetIpAccessListResponse, error) { var getIpAccessListResponse GetIpAccessListResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) queryParams := make(map[string]any) @@ -51,7 +51,7 @@ func (a *accountIpAccessListsPreviewImpl) Get(ctx context.Context, request GetAc // Get access lists. // // Gets all IP access lists for the specified account. -func (a *accountIpAccessListsPreviewImpl) List(ctx context.Context) listing.Iterator[IpAccessListInfo] { +func (a *accountIpAccessListsImpl) List(ctx context.Context) listing.Iterator[IpAccessListInfo] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*GetIpAccessListsResponse, error) { @@ -73,11 +73,11 @@ func (a *accountIpAccessListsPreviewImpl) List(ctx context.Context) listing.Iter // Get access lists. // // Gets all IP access lists for the specified account. -func (a *accountIpAccessListsPreviewImpl) ListAll(ctx context.Context) ([]IpAccessListInfo, error) { +func (a *accountIpAccessListsImpl) ListAll(ctx context.Context) ([]IpAccessListInfo, error) { iterator := a.List(ctx) return listing.ToSlice[IpAccessListInfo](ctx, iterator) } -func (a *accountIpAccessListsPreviewImpl) internalList(ctx context.Context) (*GetIpAccessListsResponse, error) { +func (a *accountIpAccessListsImpl) internalList(ctx context.Context) (*GetIpAccessListsResponse, error) { var getIpAccessListsResponse GetIpAccessListsResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists", a.client.ConfiguredAccountID()) @@ -87,7 +87,7 @@ func (a *accountIpAccessListsPreviewImpl) internalList(ctx context.Context) (*Ge return &getIpAccessListsResponse, err } -func (a *accountIpAccessListsPreviewImpl) Replace(ctx context.Context, request ReplaceIpAccessList) error { +func (a *accountIpAccessListsImpl) Replace(ctx context.Context, request ReplaceIpAccessList) error { var replaceResponse ReplaceResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) queryParams := make(map[string]any) @@ -98,7 +98,7 @@ func (a *accountIpAccessListsPreviewImpl) Replace(ctx context.Context, request R return err } -func (a *accountIpAccessListsPreviewImpl) Update(ctx context.Context, request UpdateIpAccessList) error { +func (a *accountIpAccessListsImpl) Update(ctx context.Context, request UpdateIpAccessList) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) queryParams := make(map[string]any) @@ -114,17 +114,12 @@ type accountSettingsImpl struct { client *client.DatabricksClient } -// unexported type that holds implementations of just AccountSettingsPreview API methods -type accountSettingsPreviewImpl struct { +// unexported type that holds implementations of just AibiDashboardEmbeddingAccessPolicy API methods +type aibiDashboardEmbeddingAccessPolicyImpl struct { client *client.DatabricksClient } -// unexported type that holds implementations of just AibiDashboardEmbeddingAccessPolicyPreview API methods -type aibiDashboardEmbeddingAccessPolicyPreviewImpl struct { - client *client.DatabricksClient -} - -func (a *aibiDashboardEmbeddingAccessPolicyPreviewImpl) Delete(ctx context.Context, request DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) (*DeleteAibiDashboardEmbeddingAccessPolicySettingResponse, error) { +func (a *aibiDashboardEmbeddingAccessPolicyImpl) Delete(ctx context.Context, request DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) (*DeleteAibiDashboardEmbeddingAccessPolicySettingResponse, error) { var deleteAibiDashboardEmbeddingAccessPolicySettingResponse DeleteAibiDashboardEmbeddingAccessPolicySettingResponse path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_acc_policy/names/default" queryParams := make(map[string]any) @@ -134,7 +129,7 @@ func (a *aibiDashboardEmbeddingAccessPolicyPreviewImpl) Delete(ctx context.Conte return &deleteAibiDashboardEmbeddingAccessPolicySettingResponse, err } -func (a *aibiDashboardEmbeddingAccessPolicyPreviewImpl) Get(ctx context.Context, request GetAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) { +func (a *aibiDashboardEmbeddingAccessPolicyImpl) Get(ctx context.Context, request GetAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) { var aibiDashboardEmbeddingAccessPolicySetting AibiDashboardEmbeddingAccessPolicySetting path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_acc_policy/names/default" queryParams := make(map[string]any) @@ -144,7 +139,7 @@ func (a *aibiDashboardEmbeddingAccessPolicyPreviewImpl) Get(ctx context.Context, return &aibiDashboardEmbeddingAccessPolicySetting, err } -func (a *aibiDashboardEmbeddingAccessPolicyPreviewImpl) Update(ctx context.Context, request UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) { +func (a *aibiDashboardEmbeddingAccessPolicyImpl) Update(ctx context.Context, request UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) { var aibiDashboardEmbeddingAccessPolicySetting AibiDashboardEmbeddingAccessPolicySetting path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_acc_policy/names/default" queryParams := make(map[string]any) @@ -155,12 +150,12 @@ func (a *aibiDashboardEmbeddingAccessPolicyPreviewImpl) Update(ctx context.Conte return &aibiDashboardEmbeddingAccessPolicySetting, err } -// unexported type that holds implementations of just AibiDashboardEmbeddingApprovedDomainsPreview API methods -type aibiDashboardEmbeddingApprovedDomainsPreviewImpl struct { +// unexported type that holds implementations of just AibiDashboardEmbeddingApprovedDomains API methods +type aibiDashboardEmbeddingApprovedDomainsImpl struct { client *client.DatabricksClient } -func (a *aibiDashboardEmbeddingApprovedDomainsPreviewImpl) Delete(ctx context.Context, request DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse, error) { +func (a *aibiDashboardEmbeddingApprovedDomainsImpl) Delete(ctx context.Context, request DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse, error) { var deleteAibiDashboardEmbeddingApprovedDomainsSettingResponse DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default" queryParams := make(map[string]any) @@ -170,7 +165,7 @@ func (a *aibiDashboardEmbeddingApprovedDomainsPreviewImpl) Delete(ctx context.Co return &deleteAibiDashboardEmbeddingApprovedDomainsSettingResponse, err } -func (a *aibiDashboardEmbeddingApprovedDomainsPreviewImpl) Get(ctx context.Context, request GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) { +func (a *aibiDashboardEmbeddingApprovedDomainsImpl) Get(ctx context.Context, request GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) { var aibiDashboardEmbeddingApprovedDomainsSetting AibiDashboardEmbeddingApprovedDomainsSetting path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default" queryParams := make(map[string]any) @@ -180,7 +175,7 @@ func (a *aibiDashboardEmbeddingApprovedDomainsPreviewImpl) Get(ctx context.Conte return &aibiDashboardEmbeddingApprovedDomainsSetting, err } -func (a *aibiDashboardEmbeddingApprovedDomainsPreviewImpl) Update(ctx context.Context, request UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) { +func (a *aibiDashboardEmbeddingApprovedDomainsImpl) Update(ctx context.Context, request UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) { var aibiDashboardEmbeddingApprovedDomainsSetting AibiDashboardEmbeddingApprovedDomainsSetting path := "/api/2.0preview/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default" queryParams := make(map[string]any) @@ -191,12 +186,12 @@ func (a *aibiDashboardEmbeddingApprovedDomainsPreviewImpl) Update(ctx context.Co return &aibiDashboardEmbeddingApprovedDomainsSetting, err } -// unexported type that holds implementations of just AutomaticClusterUpdatePreview API methods -type automaticClusterUpdatePreviewImpl struct { +// unexported type that holds implementations of just AutomaticClusterUpdate API methods +type automaticClusterUpdateImpl struct { client *client.DatabricksClient } -func (a *automaticClusterUpdatePreviewImpl) Get(ctx context.Context, request GetAutomaticClusterUpdateSettingRequest) (*AutomaticClusterUpdateSetting, error) { +func (a *automaticClusterUpdateImpl) Get(ctx context.Context, request GetAutomaticClusterUpdateSettingRequest) (*AutomaticClusterUpdateSetting, error) { var automaticClusterUpdateSetting AutomaticClusterUpdateSetting path := "/api/2.0preview/settings/types/automatic_cluster_update/names/default" queryParams := make(map[string]any) @@ -206,7 +201,7 @@ func (a *automaticClusterUpdatePreviewImpl) Get(ctx context.Context, request Get return &automaticClusterUpdateSetting, err } -func (a *automaticClusterUpdatePreviewImpl) Update(ctx context.Context, request UpdateAutomaticClusterUpdateSettingRequest) (*AutomaticClusterUpdateSetting, error) { +func (a *automaticClusterUpdateImpl) Update(ctx context.Context, request UpdateAutomaticClusterUpdateSettingRequest) (*AutomaticClusterUpdateSetting, error) { var automaticClusterUpdateSetting AutomaticClusterUpdateSetting path := "/api/2.0preview/settings/types/automatic_cluster_update/names/default" queryParams := make(map[string]any) @@ -217,12 +212,12 @@ func (a *automaticClusterUpdatePreviewImpl) Update(ctx context.Context, request return &automaticClusterUpdateSetting, err } -// unexported type that holds implementations of just ComplianceSecurityProfilePreview API methods -type complianceSecurityProfilePreviewImpl struct { +// unexported type that holds implementations of just ComplianceSecurityProfile API methods +type complianceSecurityProfileImpl struct { client *client.DatabricksClient } -func (a *complianceSecurityProfilePreviewImpl) Get(ctx context.Context, request GetComplianceSecurityProfileSettingRequest) (*ComplianceSecurityProfileSetting, error) { +func (a *complianceSecurityProfileImpl) Get(ctx context.Context, request GetComplianceSecurityProfileSettingRequest) (*ComplianceSecurityProfileSetting, error) { var complianceSecurityProfileSetting ComplianceSecurityProfileSetting path := "/api/2.0preview/settings/types/shield_csp_enablement_ws_db/names/default" queryParams := make(map[string]any) @@ -232,7 +227,7 @@ func (a *complianceSecurityProfilePreviewImpl) Get(ctx context.Context, request return &complianceSecurityProfileSetting, err } -func (a *complianceSecurityProfilePreviewImpl) Update(ctx context.Context, request UpdateComplianceSecurityProfileSettingRequest) (*ComplianceSecurityProfileSetting, error) { +func (a *complianceSecurityProfileImpl) Update(ctx context.Context, request UpdateComplianceSecurityProfileSettingRequest) (*ComplianceSecurityProfileSetting, error) { var complianceSecurityProfileSetting ComplianceSecurityProfileSetting path := "/api/2.0preview/settings/types/shield_csp_enablement_ws_db/names/default" queryParams := make(map[string]any) @@ -243,12 +238,12 @@ func (a *complianceSecurityProfilePreviewImpl) Update(ctx context.Context, reque return &complianceSecurityProfileSetting, err } -// unexported type that holds implementations of just CredentialsManagerPreview API methods -type credentialsManagerPreviewImpl struct { +// unexported type that holds implementations of just CredentialsManager API methods +type credentialsManagerImpl struct { client *client.DatabricksClient } -func (a *credentialsManagerPreviewImpl) ExchangeToken(ctx context.Context, request ExchangeTokenRequest) (*ExchangeTokenResponse, error) { +func (a *credentialsManagerImpl) ExchangeToken(ctx context.Context, request ExchangeTokenRequest) (*ExchangeTokenResponse, error) { var exchangeTokenResponse ExchangeTokenResponse path := "/api/2.0preview/credentials-manager/exchange-tokens/token" queryParams := make(map[string]any) @@ -259,12 +254,12 @@ func (a *credentialsManagerPreviewImpl) ExchangeToken(ctx context.Context, reque return &exchangeTokenResponse, err } -// unexported type that holds implementations of just CspEnablementAccountPreview API methods -type cspEnablementAccountPreviewImpl struct { +// unexported type that holds implementations of just CspEnablementAccount API methods +type cspEnablementAccountImpl struct { client *client.DatabricksClient } -func (a *cspEnablementAccountPreviewImpl) Get(ctx context.Context, request GetCspEnablementAccountSettingRequest) (*CspEnablementAccountSetting, error) { +func (a *cspEnablementAccountImpl) Get(ctx context.Context, request GetCspEnablementAccountSettingRequest) (*CspEnablementAccountSetting, error) { var cspEnablementAccountSetting CspEnablementAccountSetting path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/shield_csp_enablement_ac/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -274,7 +269,7 @@ func (a *cspEnablementAccountPreviewImpl) Get(ctx context.Context, request GetCs return &cspEnablementAccountSetting, err } -func (a *cspEnablementAccountPreviewImpl) Update(ctx context.Context, request UpdateCspEnablementAccountSettingRequest) (*CspEnablementAccountSetting, error) { +func (a *cspEnablementAccountImpl) Update(ctx context.Context, request UpdateCspEnablementAccountSettingRequest) (*CspEnablementAccountSetting, error) { var cspEnablementAccountSetting CspEnablementAccountSetting path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/shield_csp_enablement_ac/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -285,12 +280,12 @@ func (a *cspEnablementAccountPreviewImpl) Update(ctx context.Context, request Up return &cspEnablementAccountSetting, err } -// unexported type that holds implementations of just DefaultNamespacePreview API methods -type defaultNamespacePreviewImpl struct { +// unexported type that holds implementations of just DefaultNamespace API methods +type defaultNamespaceImpl struct { client *client.DatabricksClient } -func (a *defaultNamespacePreviewImpl) Delete(ctx context.Context, request DeleteDefaultNamespaceSettingRequest) (*DeleteDefaultNamespaceSettingResponse, error) { +func (a *defaultNamespaceImpl) Delete(ctx context.Context, request DeleteDefaultNamespaceSettingRequest) (*DeleteDefaultNamespaceSettingResponse, error) { var deleteDefaultNamespaceSettingResponse DeleteDefaultNamespaceSettingResponse path := "/api/2.0preview/settings/types/default_namespace_ws/names/default" queryParams := make(map[string]any) @@ -300,7 +295,7 @@ func (a *defaultNamespacePreviewImpl) Delete(ctx context.Context, request Delete return &deleteDefaultNamespaceSettingResponse, err } -func (a *defaultNamespacePreviewImpl) Get(ctx context.Context, request GetDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) { +func (a *defaultNamespaceImpl) Get(ctx context.Context, request GetDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) { var defaultNamespaceSetting DefaultNamespaceSetting path := "/api/2.0preview/settings/types/default_namespace_ws/names/default" queryParams := make(map[string]any) @@ -310,7 +305,7 @@ func (a *defaultNamespacePreviewImpl) Get(ctx context.Context, request GetDefaul return &defaultNamespaceSetting, err } -func (a *defaultNamespacePreviewImpl) Update(ctx context.Context, request UpdateDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) { +func (a *defaultNamespaceImpl) Update(ctx context.Context, request UpdateDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) { var defaultNamespaceSetting DefaultNamespaceSetting path := "/api/2.0preview/settings/types/default_namespace_ws/names/default" queryParams := make(map[string]any) @@ -321,12 +316,12 @@ func (a *defaultNamespacePreviewImpl) Update(ctx context.Context, request Update return &defaultNamespaceSetting, err } -// unexported type that holds implementations of just DisableLegacyAccessPreview API methods -type disableLegacyAccessPreviewImpl struct { +// unexported type that holds implementations of just DisableLegacyAccess API methods +type disableLegacyAccessImpl struct { client *client.DatabricksClient } -func (a *disableLegacyAccessPreviewImpl) Delete(ctx context.Context, request DeleteDisableLegacyAccessRequest) (*DeleteDisableLegacyAccessResponse, error) { +func (a *disableLegacyAccessImpl) Delete(ctx context.Context, request DeleteDisableLegacyAccessRequest) (*DeleteDisableLegacyAccessResponse, error) { var deleteDisableLegacyAccessResponse DeleteDisableLegacyAccessResponse path := "/api/2.0preview/settings/types/disable_legacy_access/names/default" queryParams := make(map[string]any) @@ -336,7 +331,7 @@ func (a *disableLegacyAccessPreviewImpl) Delete(ctx context.Context, request Del return &deleteDisableLegacyAccessResponse, err } -func (a *disableLegacyAccessPreviewImpl) Get(ctx context.Context, request GetDisableLegacyAccessRequest) (*DisableLegacyAccess, error) { +func (a *disableLegacyAccessImpl) Get(ctx context.Context, request GetDisableLegacyAccessRequest) (*DisableLegacyAccess, error) { var disableLegacyAccess DisableLegacyAccess path := "/api/2.0preview/settings/types/disable_legacy_access/names/default" queryParams := make(map[string]any) @@ -346,7 +341,7 @@ func (a *disableLegacyAccessPreviewImpl) Get(ctx context.Context, request GetDis return &disableLegacyAccess, err } -func (a *disableLegacyAccessPreviewImpl) Update(ctx context.Context, request UpdateDisableLegacyAccessRequest) (*DisableLegacyAccess, error) { +func (a *disableLegacyAccessImpl) Update(ctx context.Context, request UpdateDisableLegacyAccessRequest) (*DisableLegacyAccess, error) { var disableLegacyAccess DisableLegacyAccess path := "/api/2.0preview/settings/types/disable_legacy_access/names/default" queryParams := make(map[string]any) @@ -357,12 +352,12 @@ func (a *disableLegacyAccessPreviewImpl) Update(ctx context.Context, request Upd return &disableLegacyAccess, err } -// unexported type that holds implementations of just DisableLegacyDbfsPreview API methods -type disableLegacyDbfsPreviewImpl struct { +// unexported type that holds implementations of just DisableLegacyDbfs API methods +type disableLegacyDbfsImpl struct { client *client.DatabricksClient } -func (a *disableLegacyDbfsPreviewImpl) Delete(ctx context.Context, request DeleteDisableLegacyDbfsRequest) (*DeleteDisableLegacyDbfsResponse, error) { +func (a *disableLegacyDbfsImpl) Delete(ctx context.Context, request DeleteDisableLegacyDbfsRequest) (*DeleteDisableLegacyDbfsResponse, error) { var deleteDisableLegacyDbfsResponse DeleteDisableLegacyDbfsResponse path := "/api/2.0preview/settings/types/disable_legacy_dbfs/names/default" queryParams := make(map[string]any) @@ -372,7 +367,7 @@ func (a *disableLegacyDbfsPreviewImpl) Delete(ctx context.Context, request Delet return &deleteDisableLegacyDbfsResponse, err } -func (a *disableLegacyDbfsPreviewImpl) Get(ctx context.Context, request GetDisableLegacyDbfsRequest) (*DisableLegacyDbfs, error) { +func (a *disableLegacyDbfsImpl) Get(ctx context.Context, request GetDisableLegacyDbfsRequest) (*DisableLegacyDbfs, error) { var disableLegacyDbfs DisableLegacyDbfs path := "/api/2.0preview/settings/types/disable_legacy_dbfs/names/default" queryParams := make(map[string]any) @@ -382,7 +377,7 @@ func (a *disableLegacyDbfsPreviewImpl) Get(ctx context.Context, request GetDisab return &disableLegacyDbfs, err } -func (a *disableLegacyDbfsPreviewImpl) Update(ctx context.Context, request UpdateDisableLegacyDbfsRequest) (*DisableLegacyDbfs, error) { +func (a *disableLegacyDbfsImpl) Update(ctx context.Context, request UpdateDisableLegacyDbfsRequest) (*DisableLegacyDbfs, error) { var disableLegacyDbfs DisableLegacyDbfs path := "/api/2.0preview/settings/types/disable_legacy_dbfs/names/default" queryParams := make(map[string]any) @@ -393,12 +388,12 @@ func (a *disableLegacyDbfsPreviewImpl) Update(ctx context.Context, request Updat return &disableLegacyDbfs, err } -// unexported type that holds implementations of just DisableLegacyFeaturesPreview API methods -type disableLegacyFeaturesPreviewImpl struct { +// unexported type that holds implementations of just DisableLegacyFeatures API methods +type disableLegacyFeaturesImpl struct { client *client.DatabricksClient } -func (a *disableLegacyFeaturesPreviewImpl) Delete(ctx context.Context, request DeleteDisableLegacyFeaturesRequest) (*DeleteDisableLegacyFeaturesResponse, error) { +func (a *disableLegacyFeaturesImpl) Delete(ctx context.Context, request DeleteDisableLegacyFeaturesRequest) (*DeleteDisableLegacyFeaturesResponse, error) { var deleteDisableLegacyFeaturesResponse DeleteDisableLegacyFeaturesResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -408,7 +403,7 @@ func (a *disableLegacyFeaturesPreviewImpl) Delete(ctx context.Context, request D return &deleteDisableLegacyFeaturesResponse, err } -func (a *disableLegacyFeaturesPreviewImpl) Get(ctx context.Context, request GetDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) { +func (a *disableLegacyFeaturesImpl) Get(ctx context.Context, request GetDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) { var disableLegacyFeatures DisableLegacyFeatures path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -418,7 +413,7 @@ func (a *disableLegacyFeaturesPreviewImpl) Get(ctx context.Context, request GetD return &disableLegacyFeatures, err } -func (a *disableLegacyFeaturesPreviewImpl) Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) { +func (a *disableLegacyFeaturesImpl) Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) { var disableLegacyFeatures DisableLegacyFeatures path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -429,12 +424,12 @@ func (a *disableLegacyFeaturesPreviewImpl) Update(ctx context.Context, request U return &disableLegacyFeatures, err } -// unexported type that holds implementations of just EnableIpAccessListsPreview API methods -type enableIpAccessListsPreviewImpl struct { +// unexported type that holds implementations of just EnableIpAccessLists API methods +type enableIpAccessListsImpl struct { client *client.DatabricksClient } -func (a *enableIpAccessListsPreviewImpl) Delete(ctx context.Context, request DeleteAccountIpAccessEnableRequest) (*DeleteAccountIpAccessEnableResponse, error) { +func (a *enableIpAccessListsImpl) Delete(ctx context.Context, request DeleteAccountIpAccessEnableRequest) (*DeleteAccountIpAccessEnableResponse, error) { var deleteAccountIpAccessEnableResponse DeleteAccountIpAccessEnableResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/acct_ip_acl_enable/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -444,7 +439,7 @@ func (a *enableIpAccessListsPreviewImpl) Delete(ctx context.Context, request Del return &deleteAccountIpAccessEnableResponse, err } -func (a *enableIpAccessListsPreviewImpl) Get(ctx context.Context, request GetAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) { +func (a *enableIpAccessListsImpl) Get(ctx context.Context, request GetAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) { var accountIpAccessEnable AccountIpAccessEnable path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/acct_ip_acl_enable/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -454,7 +449,7 @@ func (a *enableIpAccessListsPreviewImpl) Get(ctx context.Context, request GetAcc return &accountIpAccessEnable, err } -func (a *enableIpAccessListsPreviewImpl) Update(ctx context.Context, request UpdateAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) { +func (a *enableIpAccessListsImpl) Update(ctx context.Context, request UpdateAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) { var accountIpAccessEnable AccountIpAccessEnable path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/acct_ip_acl_enable/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -465,12 +460,12 @@ func (a *enableIpAccessListsPreviewImpl) Update(ctx context.Context, request Upd return &accountIpAccessEnable, err } -// unexported type that holds implementations of just EnhancedSecurityMonitoringPreview API methods -type enhancedSecurityMonitoringPreviewImpl struct { +// unexported type that holds implementations of just EnhancedSecurityMonitoring API methods +type enhancedSecurityMonitoringImpl struct { client *client.DatabricksClient } -func (a *enhancedSecurityMonitoringPreviewImpl) Get(ctx context.Context, request GetEnhancedSecurityMonitoringSettingRequest) (*EnhancedSecurityMonitoringSetting, error) { +func (a *enhancedSecurityMonitoringImpl) Get(ctx context.Context, request GetEnhancedSecurityMonitoringSettingRequest) (*EnhancedSecurityMonitoringSetting, error) { var enhancedSecurityMonitoringSetting EnhancedSecurityMonitoringSetting path := "/api/2.0preview/settings/types/shield_esm_enablement_ws_db/names/default" queryParams := make(map[string]any) @@ -480,7 +475,7 @@ func (a *enhancedSecurityMonitoringPreviewImpl) Get(ctx context.Context, request return &enhancedSecurityMonitoringSetting, err } -func (a *enhancedSecurityMonitoringPreviewImpl) Update(ctx context.Context, request UpdateEnhancedSecurityMonitoringSettingRequest) (*EnhancedSecurityMonitoringSetting, error) { +func (a *enhancedSecurityMonitoringImpl) Update(ctx context.Context, request UpdateEnhancedSecurityMonitoringSettingRequest) (*EnhancedSecurityMonitoringSetting, error) { var enhancedSecurityMonitoringSetting EnhancedSecurityMonitoringSetting path := "/api/2.0preview/settings/types/shield_esm_enablement_ws_db/names/default" queryParams := make(map[string]any) @@ -491,12 +486,12 @@ func (a *enhancedSecurityMonitoringPreviewImpl) Update(ctx context.Context, requ return &enhancedSecurityMonitoringSetting, err } -// unexported type that holds implementations of just EsmEnablementAccountPreview API methods -type esmEnablementAccountPreviewImpl struct { +// unexported type that holds implementations of just EsmEnablementAccount API methods +type esmEnablementAccountImpl struct { client *client.DatabricksClient } -func (a *esmEnablementAccountPreviewImpl) Get(ctx context.Context, request GetEsmEnablementAccountSettingRequest) (*EsmEnablementAccountSetting, error) { +func (a *esmEnablementAccountImpl) Get(ctx context.Context, request GetEsmEnablementAccountSettingRequest) (*EsmEnablementAccountSetting, error) { var esmEnablementAccountSetting EsmEnablementAccountSetting path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/shield_esm_enablement_ac/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -506,7 +501,7 @@ func (a *esmEnablementAccountPreviewImpl) Get(ctx context.Context, request GetEs return &esmEnablementAccountSetting, err } -func (a *esmEnablementAccountPreviewImpl) Update(ctx context.Context, request UpdateEsmEnablementAccountSettingRequest) (*EsmEnablementAccountSetting, error) { +func (a *esmEnablementAccountImpl) Update(ctx context.Context, request UpdateEsmEnablementAccountSettingRequest) (*EsmEnablementAccountSetting, error) { var esmEnablementAccountSetting EsmEnablementAccountSetting path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/shield_esm_enablement_ac/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -517,12 +512,12 @@ func (a *esmEnablementAccountPreviewImpl) Update(ctx context.Context, request Up return &esmEnablementAccountSetting, err } -// unexported type that holds implementations of just IpAccessListsPreview API methods -type ipAccessListsPreviewImpl struct { +// unexported type that holds implementations of just IpAccessLists API methods +type ipAccessListsImpl struct { client *client.DatabricksClient } -func (a *ipAccessListsPreviewImpl) Create(ctx context.Context, request CreateIpAccessList) (*CreateIpAccessListResponse, error) { +func (a *ipAccessListsImpl) Create(ctx context.Context, request CreateIpAccessList) (*CreateIpAccessListResponse, error) { var createIpAccessListResponse CreateIpAccessListResponse path := "/api/2.0preview/ip-access-lists" queryParams := make(map[string]any) @@ -533,7 +528,7 @@ func (a *ipAccessListsPreviewImpl) Create(ctx context.Context, request CreateIpA return &createIpAccessListResponse, err } -func (a *ipAccessListsPreviewImpl) Delete(ctx context.Context, request DeleteIpAccessListRequest) error { +func (a *ipAccessListsImpl) Delete(ctx context.Context, request DeleteIpAccessListRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/ip-access-lists/%v", request.IpAccessListId) queryParams := make(map[string]any) @@ -543,7 +538,7 @@ func (a *ipAccessListsPreviewImpl) Delete(ctx context.Context, request DeleteIpA return err } -func (a *ipAccessListsPreviewImpl) Get(ctx context.Context, request GetIpAccessListRequest) (*FetchIpAccessListResponse, error) { +func (a *ipAccessListsImpl) Get(ctx context.Context, request GetIpAccessListRequest) (*FetchIpAccessListResponse, error) { var fetchIpAccessListResponse FetchIpAccessListResponse path := fmt.Sprintf("/api/2.0preview/ip-access-lists/%v", request.IpAccessListId) queryParams := make(map[string]any) @@ -556,7 +551,7 @@ func (a *ipAccessListsPreviewImpl) Get(ctx context.Context, request GetIpAccessL // Get access lists. // // Gets all IP access lists for the specified workspace. -func (a *ipAccessListsPreviewImpl) List(ctx context.Context) listing.Iterator[IpAccessListInfo] { +func (a *ipAccessListsImpl) List(ctx context.Context) listing.Iterator[IpAccessListInfo] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*ListIpAccessListResponse, error) { @@ -578,11 +573,11 @@ func (a *ipAccessListsPreviewImpl) List(ctx context.Context) listing.Iterator[Ip // Get access lists. // // Gets all IP access lists for the specified workspace. -func (a *ipAccessListsPreviewImpl) ListAll(ctx context.Context) ([]IpAccessListInfo, error) { +func (a *ipAccessListsImpl) ListAll(ctx context.Context) ([]IpAccessListInfo, error) { iterator := a.List(ctx) return listing.ToSlice[IpAccessListInfo](ctx, iterator) } -func (a *ipAccessListsPreviewImpl) internalList(ctx context.Context) (*ListIpAccessListResponse, error) { +func (a *ipAccessListsImpl) internalList(ctx context.Context) (*ListIpAccessListResponse, error) { var listIpAccessListResponse ListIpAccessListResponse path := "/api/2.0preview/ip-access-lists" @@ -592,7 +587,7 @@ func (a *ipAccessListsPreviewImpl) internalList(ctx context.Context) (*ListIpAcc return &listIpAccessListResponse, err } -func (a *ipAccessListsPreviewImpl) Replace(ctx context.Context, request ReplaceIpAccessList) error { +func (a *ipAccessListsImpl) Replace(ctx context.Context, request ReplaceIpAccessList) error { var replaceResponse ReplaceResponse path := fmt.Sprintf("/api/2.0preview/ip-access-lists/%v", request.IpAccessListId) queryParams := make(map[string]any) @@ -603,7 +598,7 @@ func (a *ipAccessListsPreviewImpl) Replace(ctx context.Context, request ReplaceI return err } -func (a *ipAccessListsPreviewImpl) Update(ctx context.Context, request UpdateIpAccessList) error { +func (a *ipAccessListsImpl) Update(ctx context.Context, request UpdateIpAccessList) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0preview/ip-access-lists/%v", request.IpAccessListId) queryParams := make(map[string]any) @@ -614,12 +609,12 @@ func (a *ipAccessListsPreviewImpl) Update(ctx context.Context, request UpdateIpA return err } -// unexported type that holds implementations of just NetworkConnectivityPreview API methods -type networkConnectivityPreviewImpl struct { +// unexported type that holds implementations of just NetworkConnectivity API methods +type networkConnectivityImpl struct { client *client.DatabricksClient } -func (a *networkConnectivityPreviewImpl) CreateNetworkConnectivityConfiguration(ctx context.Context, request CreateNetworkConnectivityConfigRequest) (*NetworkConnectivityConfiguration, error) { +func (a *networkConnectivityImpl) CreateNetworkConnectivityConfiguration(ctx context.Context, request CreateNetworkConnectivityConfigRequest) (*NetworkConnectivityConfiguration, error) { var networkConnectivityConfiguration NetworkConnectivityConfiguration path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -630,7 +625,7 @@ func (a *networkConnectivityPreviewImpl) CreateNetworkConnectivityConfiguration( return &networkConnectivityConfiguration, err } -func (a *networkConnectivityPreviewImpl) CreatePrivateEndpointRule(ctx context.Context, request CreatePrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { +func (a *networkConnectivityImpl) CreatePrivateEndpointRule(ctx context.Context, request CreatePrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { var nccAzurePrivateEndpointRule NccAzurePrivateEndpointRule path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) queryParams := make(map[string]any) @@ -641,7 +636,7 @@ func (a *networkConnectivityPreviewImpl) CreatePrivateEndpointRule(ctx context.C return &nccAzurePrivateEndpointRule, err } -func (a *networkConnectivityPreviewImpl) DeleteNetworkConnectivityConfiguration(ctx context.Context, request DeleteNetworkConnectivityConfigurationRequest) error { +func (a *networkConnectivityImpl) DeleteNetworkConnectivityConfiguration(ctx context.Context, request DeleteNetworkConnectivityConfigurationRequest) error { var deleteNetworkConnectivityConfigurationResponse DeleteNetworkConnectivityConfigurationResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) queryParams := make(map[string]any) @@ -651,7 +646,7 @@ func (a *networkConnectivityPreviewImpl) DeleteNetworkConnectivityConfiguration( return err } -func (a *networkConnectivityPreviewImpl) DeletePrivateEndpointRule(ctx context.Context, request DeletePrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { +func (a *networkConnectivityImpl) DeletePrivateEndpointRule(ctx context.Context, request DeletePrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { var nccAzurePrivateEndpointRule NccAzurePrivateEndpointRule path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId, request.PrivateEndpointRuleId) queryParams := make(map[string]any) @@ -661,7 +656,7 @@ func (a *networkConnectivityPreviewImpl) DeletePrivateEndpointRule(ctx context.C return &nccAzurePrivateEndpointRule, err } -func (a *networkConnectivityPreviewImpl) GetNetworkConnectivityConfiguration(ctx context.Context, request GetNetworkConnectivityConfigurationRequest) (*NetworkConnectivityConfiguration, error) { +func (a *networkConnectivityImpl) GetNetworkConnectivityConfiguration(ctx context.Context, request GetNetworkConnectivityConfigurationRequest) (*NetworkConnectivityConfiguration, error) { var networkConnectivityConfiguration NetworkConnectivityConfiguration path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) queryParams := make(map[string]any) @@ -671,7 +666,7 @@ func (a *networkConnectivityPreviewImpl) GetNetworkConnectivityConfiguration(ctx return &networkConnectivityConfiguration, err } -func (a *networkConnectivityPreviewImpl) GetPrivateEndpointRule(ctx context.Context, request GetPrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { +func (a *networkConnectivityImpl) GetPrivateEndpointRule(ctx context.Context, request GetPrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { var nccAzurePrivateEndpointRule NccAzurePrivateEndpointRule path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId, request.PrivateEndpointRuleId) queryParams := make(map[string]any) @@ -684,7 +679,7 @@ func (a *networkConnectivityPreviewImpl) GetPrivateEndpointRule(ctx context.Cont // List network connectivity configurations. // // Gets an array of network connectivity configurations. -func (a *networkConnectivityPreviewImpl) ListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) listing.Iterator[NetworkConnectivityConfiguration] { +func (a *networkConnectivityImpl) ListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) listing.Iterator[NetworkConnectivityConfiguration] { getNextPage := func(ctx context.Context, req ListNetworkConnectivityConfigurationsRequest) (*ListNetworkConnectivityConfigurationsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -711,11 +706,11 @@ func (a *networkConnectivityPreviewImpl) ListNetworkConnectivityConfigurations(c // List network connectivity configurations. // // Gets an array of network connectivity configurations. -func (a *networkConnectivityPreviewImpl) ListNetworkConnectivityConfigurationsAll(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) ([]NetworkConnectivityConfiguration, error) { +func (a *networkConnectivityImpl) ListNetworkConnectivityConfigurationsAll(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) ([]NetworkConnectivityConfiguration, error) { iterator := a.ListNetworkConnectivityConfigurations(ctx, request) return listing.ToSlice[NetworkConnectivityConfiguration](ctx, iterator) } -func (a *networkConnectivityPreviewImpl) internalListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) (*ListNetworkConnectivityConfigurationsResponse, error) { +func (a *networkConnectivityImpl) internalListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) (*ListNetworkConnectivityConfigurationsResponse, error) { var listNetworkConnectivityConfigurationsResponse ListNetworkConnectivityConfigurationsResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -728,7 +723,7 @@ func (a *networkConnectivityPreviewImpl) internalListNetworkConnectivityConfigur // List private endpoint rules. // // Gets an array of private endpoint rules. -func (a *networkConnectivityPreviewImpl) ListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) listing.Iterator[NccAzurePrivateEndpointRule] { +func (a *networkConnectivityImpl) ListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) listing.Iterator[NccAzurePrivateEndpointRule] { getNextPage := func(ctx context.Context, req ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -755,11 +750,11 @@ func (a *networkConnectivityPreviewImpl) ListPrivateEndpointRules(ctx context.Co // List private endpoint rules. // // Gets an array of private endpoint rules. -func (a *networkConnectivityPreviewImpl) ListPrivateEndpointRulesAll(ctx context.Context, request ListPrivateEndpointRulesRequest) ([]NccAzurePrivateEndpointRule, error) { +func (a *networkConnectivityImpl) ListPrivateEndpointRulesAll(ctx context.Context, request ListPrivateEndpointRulesRequest) ([]NccAzurePrivateEndpointRule, error) { iterator := a.ListPrivateEndpointRules(ctx, request) return listing.ToSlice[NccAzurePrivateEndpointRule](ctx, iterator) } -func (a *networkConnectivityPreviewImpl) internalListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) { +func (a *networkConnectivityImpl) internalListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) { var listNccAzurePrivateEndpointRulesResponse ListNccAzurePrivateEndpointRulesResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) queryParams := make(map[string]any) @@ -769,12 +764,12 @@ func (a *networkConnectivityPreviewImpl) internalListPrivateEndpointRules(ctx co return &listNccAzurePrivateEndpointRulesResponse, err } -// unexported type that holds implementations of just NotificationDestinationsPreview API methods -type notificationDestinationsPreviewImpl struct { +// unexported type that holds implementations of just NotificationDestinations API methods +type notificationDestinationsImpl struct { client *client.DatabricksClient } -func (a *notificationDestinationsPreviewImpl) Create(ctx context.Context, request CreateNotificationDestinationRequest) (*NotificationDestination, error) { +func (a *notificationDestinationsImpl) Create(ctx context.Context, request CreateNotificationDestinationRequest) (*NotificationDestination, error) { var notificationDestination NotificationDestination path := "/api/2.0preview/notification-destinations" queryParams := make(map[string]any) @@ -785,7 +780,7 @@ func (a *notificationDestinationsPreviewImpl) Create(ctx context.Context, reques return ¬ificationDestination, err } -func (a *notificationDestinationsPreviewImpl) Delete(ctx context.Context, request DeleteNotificationDestinationRequest) error { +func (a *notificationDestinationsImpl) Delete(ctx context.Context, request DeleteNotificationDestinationRequest) error { var empty Empty path := fmt.Sprintf("/api/2.0preview/notification-destinations/%v", request.Id) queryParams := make(map[string]any) @@ -795,7 +790,7 @@ func (a *notificationDestinationsPreviewImpl) Delete(ctx context.Context, reques return err } -func (a *notificationDestinationsPreviewImpl) Get(ctx context.Context, request GetNotificationDestinationRequest) (*NotificationDestination, error) { +func (a *notificationDestinationsImpl) Get(ctx context.Context, request GetNotificationDestinationRequest) (*NotificationDestination, error) { var notificationDestination NotificationDestination path := fmt.Sprintf("/api/2.0preview/notification-destinations/%v", request.Id) queryParams := make(map[string]any) @@ -808,7 +803,7 @@ func (a *notificationDestinationsPreviewImpl) Get(ctx context.Context, request G // List notification destinations. // // Lists notification destinations. -func (a *notificationDestinationsPreviewImpl) List(ctx context.Context, request ListNotificationDestinationsRequest) listing.Iterator[ListNotificationDestinationsResult] { +func (a *notificationDestinationsImpl) List(ctx context.Context, request ListNotificationDestinationsRequest) listing.Iterator[ListNotificationDestinationsResult] { getNextPage := func(ctx context.Context, req ListNotificationDestinationsRequest) (*ListNotificationDestinationsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -835,11 +830,11 @@ func (a *notificationDestinationsPreviewImpl) List(ctx context.Context, request // List notification destinations. // // Lists notification destinations. -func (a *notificationDestinationsPreviewImpl) ListAll(ctx context.Context, request ListNotificationDestinationsRequest) ([]ListNotificationDestinationsResult, error) { +func (a *notificationDestinationsImpl) ListAll(ctx context.Context, request ListNotificationDestinationsRequest) ([]ListNotificationDestinationsResult, error) { iterator := a.List(ctx, request) return listing.ToSlice[ListNotificationDestinationsResult](ctx, iterator) } -func (a *notificationDestinationsPreviewImpl) internalList(ctx context.Context, request ListNotificationDestinationsRequest) (*ListNotificationDestinationsResponse, error) { +func (a *notificationDestinationsImpl) internalList(ctx context.Context, request ListNotificationDestinationsRequest) (*ListNotificationDestinationsResponse, error) { var listNotificationDestinationsResponse ListNotificationDestinationsResponse path := "/api/2.0preview/notification-destinations" queryParams := make(map[string]any) @@ -849,7 +844,7 @@ func (a *notificationDestinationsPreviewImpl) internalList(ctx context.Context, return &listNotificationDestinationsResponse, err } -func (a *notificationDestinationsPreviewImpl) Update(ctx context.Context, request UpdateNotificationDestinationRequest) (*NotificationDestination, error) { +func (a *notificationDestinationsImpl) Update(ctx context.Context, request UpdateNotificationDestinationRequest) (*NotificationDestination, error) { var notificationDestination NotificationDestination path := fmt.Sprintf("/api/2.0preview/notification-destinations/%v", request.Id) queryParams := make(map[string]any) @@ -860,12 +855,12 @@ func (a *notificationDestinationsPreviewImpl) Update(ctx context.Context, reques return ¬ificationDestination, err } -// unexported type that holds implementations of just PersonalComputePreview API methods -type personalComputePreviewImpl struct { +// unexported type that holds implementations of just PersonalCompute API methods +type personalComputeImpl struct { client *client.DatabricksClient } -func (a *personalComputePreviewImpl) Delete(ctx context.Context, request DeletePersonalComputeSettingRequest) (*DeletePersonalComputeSettingResponse, error) { +func (a *personalComputeImpl) Delete(ctx context.Context, request DeletePersonalComputeSettingRequest) (*DeletePersonalComputeSettingResponse, error) { var deletePersonalComputeSettingResponse DeletePersonalComputeSettingResponse path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/dcp_acct_enable/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -875,7 +870,7 @@ func (a *personalComputePreviewImpl) Delete(ctx context.Context, request DeleteP return &deletePersonalComputeSettingResponse, err } -func (a *personalComputePreviewImpl) Get(ctx context.Context, request GetPersonalComputeSettingRequest) (*PersonalComputeSetting, error) { +func (a *personalComputeImpl) Get(ctx context.Context, request GetPersonalComputeSettingRequest) (*PersonalComputeSetting, error) { var personalComputeSetting PersonalComputeSetting path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/dcp_acct_enable/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -885,7 +880,7 @@ func (a *personalComputePreviewImpl) Get(ctx context.Context, request GetPersona return &personalComputeSetting, err } -func (a *personalComputePreviewImpl) Update(ctx context.Context, request UpdatePersonalComputeSettingRequest) (*PersonalComputeSetting, error) { +func (a *personalComputeImpl) Update(ctx context.Context, request UpdatePersonalComputeSettingRequest) (*PersonalComputeSetting, error) { var personalComputeSetting PersonalComputeSetting path := fmt.Sprintf("/api/2.0preview/accounts/%v/settings/types/dcp_acct_enable/names/default", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -896,12 +891,12 @@ func (a *personalComputePreviewImpl) Update(ctx context.Context, request UpdateP return &personalComputeSetting, err } -// unexported type that holds implementations of just RestrictWorkspaceAdminsPreview API methods -type restrictWorkspaceAdminsPreviewImpl struct { +// unexported type that holds implementations of just RestrictWorkspaceAdmins API methods +type restrictWorkspaceAdminsImpl struct { client *client.DatabricksClient } -func (a *restrictWorkspaceAdminsPreviewImpl) Delete(ctx context.Context, request DeleteRestrictWorkspaceAdminsSettingRequest) (*DeleteRestrictWorkspaceAdminsSettingResponse, error) { +func (a *restrictWorkspaceAdminsImpl) Delete(ctx context.Context, request DeleteRestrictWorkspaceAdminsSettingRequest) (*DeleteRestrictWorkspaceAdminsSettingResponse, error) { var deleteRestrictWorkspaceAdminsSettingResponse DeleteRestrictWorkspaceAdminsSettingResponse path := "/api/2.0preview/settings/types/restrict_workspace_admins/names/default" queryParams := make(map[string]any) @@ -911,7 +906,7 @@ func (a *restrictWorkspaceAdminsPreviewImpl) Delete(ctx context.Context, request return &deleteRestrictWorkspaceAdminsSettingResponse, err } -func (a *restrictWorkspaceAdminsPreviewImpl) Get(ctx context.Context, request GetRestrictWorkspaceAdminsSettingRequest) (*RestrictWorkspaceAdminsSetting, error) { +func (a *restrictWorkspaceAdminsImpl) Get(ctx context.Context, request GetRestrictWorkspaceAdminsSettingRequest) (*RestrictWorkspaceAdminsSetting, error) { var restrictWorkspaceAdminsSetting RestrictWorkspaceAdminsSetting path := "/api/2.0preview/settings/types/restrict_workspace_admins/names/default" queryParams := make(map[string]any) @@ -921,7 +916,7 @@ func (a *restrictWorkspaceAdminsPreviewImpl) Get(ctx context.Context, request Ge return &restrictWorkspaceAdminsSetting, err } -func (a *restrictWorkspaceAdminsPreviewImpl) Update(ctx context.Context, request UpdateRestrictWorkspaceAdminsSettingRequest) (*RestrictWorkspaceAdminsSetting, error) { +func (a *restrictWorkspaceAdminsImpl) Update(ctx context.Context, request UpdateRestrictWorkspaceAdminsSettingRequest) (*RestrictWorkspaceAdminsSetting, error) { var restrictWorkspaceAdminsSetting RestrictWorkspaceAdminsSetting path := "/api/2.0preview/settings/types/restrict_workspace_admins/names/default" queryParams := make(map[string]any) @@ -937,17 +932,12 @@ type settingsImpl struct { client *client.DatabricksClient } -// unexported type that holds implementations of just SettingsPreview API methods -type settingsPreviewImpl struct { - client *client.DatabricksClient -} - -// unexported type that holds implementations of just TokenManagementPreview API methods -type tokenManagementPreviewImpl struct { +// unexported type that holds implementations of just TokenManagement API methods +type tokenManagementImpl struct { client *client.DatabricksClient } -func (a *tokenManagementPreviewImpl) CreateOboToken(ctx context.Context, request CreateOboTokenRequest) (*CreateOboTokenResponse, error) { +func (a *tokenManagementImpl) CreateOboToken(ctx context.Context, request CreateOboTokenRequest) (*CreateOboTokenResponse, error) { var createOboTokenResponse CreateOboTokenResponse path := "/api/2.0preview/token-management/on-behalf-of/tokens" queryParams := make(map[string]any) @@ -958,7 +948,7 @@ func (a *tokenManagementPreviewImpl) CreateOboToken(ctx context.Context, request return &createOboTokenResponse, err } -func (a *tokenManagementPreviewImpl) Delete(ctx context.Context, request DeleteTokenManagementRequest) error { +func (a *tokenManagementImpl) Delete(ctx context.Context, request DeleteTokenManagementRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/token-management/tokens/%v", request.TokenId) queryParams := make(map[string]any) @@ -968,7 +958,7 @@ func (a *tokenManagementPreviewImpl) Delete(ctx context.Context, request DeleteT return err } -func (a *tokenManagementPreviewImpl) Get(ctx context.Context, request GetTokenManagementRequest) (*GetTokenResponse, error) { +func (a *tokenManagementImpl) Get(ctx context.Context, request GetTokenManagementRequest) (*GetTokenResponse, error) { var getTokenResponse GetTokenResponse path := fmt.Sprintf("/api/2.0preview/token-management/tokens/%v", request.TokenId) queryParams := make(map[string]any) @@ -978,7 +968,7 @@ func (a *tokenManagementPreviewImpl) Get(ctx context.Context, request GetTokenMa return &getTokenResponse, err } -func (a *tokenManagementPreviewImpl) GetPermissionLevels(ctx context.Context) (*GetTokenPermissionLevelsResponse, error) { +func (a *tokenManagementImpl) GetPermissionLevels(ctx context.Context) (*GetTokenPermissionLevelsResponse, error) { var getTokenPermissionLevelsResponse GetTokenPermissionLevelsResponse path := "/api/2.0preview/permissions/authorization/tokens/permissionLevels" @@ -988,7 +978,7 @@ func (a *tokenManagementPreviewImpl) GetPermissionLevels(ctx context.Context) (* return &getTokenPermissionLevelsResponse, err } -func (a *tokenManagementPreviewImpl) GetPermissions(ctx context.Context) (*TokenPermissions, error) { +func (a *tokenManagementImpl) GetPermissions(ctx context.Context) (*TokenPermissions, error) { var tokenPermissions TokenPermissions path := "/api/2.0preview/permissions/authorization/tokens" @@ -1001,7 +991,7 @@ func (a *tokenManagementPreviewImpl) GetPermissions(ctx context.Context) (*Token // List all tokens. // // Lists all tokens associated with the specified workspace or user. -func (a *tokenManagementPreviewImpl) List(ctx context.Context, request ListTokenManagementRequest) listing.Iterator[TokenInfo] { +func (a *tokenManagementImpl) List(ctx context.Context, request ListTokenManagementRequest) listing.Iterator[TokenInfo] { getNextPage := func(ctx context.Context, req ListTokenManagementRequest) (*ListTokensResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -1022,11 +1012,11 @@ func (a *tokenManagementPreviewImpl) List(ctx context.Context, request ListToken // List all tokens. // // Lists all tokens associated with the specified workspace or user. -func (a *tokenManagementPreviewImpl) ListAll(ctx context.Context, request ListTokenManagementRequest) ([]TokenInfo, error) { +func (a *tokenManagementImpl) ListAll(ctx context.Context, request ListTokenManagementRequest) ([]TokenInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[TokenInfo](ctx, iterator) } -func (a *tokenManagementPreviewImpl) internalList(ctx context.Context, request ListTokenManagementRequest) (*ListTokensResponse, error) { +func (a *tokenManagementImpl) internalList(ctx context.Context, request ListTokenManagementRequest) (*ListTokensResponse, error) { var listTokensResponse ListTokensResponse path := "/api/2.0preview/token-management/tokens" queryParams := make(map[string]any) @@ -1036,7 +1026,7 @@ func (a *tokenManagementPreviewImpl) internalList(ctx context.Context, request L return &listTokensResponse, err } -func (a *tokenManagementPreviewImpl) SetPermissions(ctx context.Context, request TokenPermissionsRequest) (*TokenPermissions, error) { +func (a *tokenManagementImpl) SetPermissions(ctx context.Context, request TokenPermissionsRequest) (*TokenPermissions, error) { var tokenPermissions TokenPermissions path := "/api/2.0preview/permissions/authorization/tokens" queryParams := make(map[string]any) @@ -1047,7 +1037,7 @@ func (a *tokenManagementPreviewImpl) SetPermissions(ctx context.Context, request return &tokenPermissions, err } -func (a *tokenManagementPreviewImpl) UpdatePermissions(ctx context.Context, request TokenPermissionsRequest) (*TokenPermissions, error) { +func (a *tokenManagementImpl) UpdatePermissions(ctx context.Context, request TokenPermissionsRequest) (*TokenPermissions, error) { var tokenPermissions TokenPermissions path := "/api/2.0preview/permissions/authorization/tokens" queryParams := make(map[string]any) @@ -1058,12 +1048,12 @@ func (a *tokenManagementPreviewImpl) UpdatePermissions(ctx context.Context, requ return &tokenPermissions, err } -// unexported type that holds implementations of just TokensPreview API methods -type tokensPreviewImpl struct { +// unexported type that holds implementations of just Tokens API methods +type tokensImpl struct { client *client.DatabricksClient } -func (a *tokensPreviewImpl) Create(ctx context.Context, request CreateTokenRequest) (*CreateTokenResponse, error) { +func (a *tokensImpl) Create(ctx context.Context, request CreateTokenRequest) (*CreateTokenResponse, error) { var createTokenResponse CreateTokenResponse path := "/api/2.0preview/token/create" queryParams := make(map[string]any) @@ -1074,7 +1064,7 @@ func (a *tokensPreviewImpl) Create(ctx context.Context, request CreateTokenReque return &createTokenResponse, err } -func (a *tokensPreviewImpl) Delete(ctx context.Context, request RevokeTokenRequest) error { +func (a *tokensImpl) Delete(ctx context.Context, request RevokeTokenRequest) error { var revokeTokenResponse RevokeTokenResponse path := "/api/2.0preview/token/delete" queryParams := make(map[string]any) @@ -1088,7 +1078,7 @@ func (a *tokensPreviewImpl) Delete(ctx context.Context, request RevokeTokenReque // List tokens. // // Lists all the valid tokens for a user-workspace pair. -func (a *tokensPreviewImpl) List(ctx context.Context) listing.Iterator[PublicTokenInfo] { +func (a *tokensImpl) List(ctx context.Context) listing.Iterator[PublicTokenInfo] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*ListPublicTokensResponse, error) { @@ -1110,11 +1100,11 @@ func (a *tokensPreviewImpl) List(ctx context.Context) listing.Iterator[PublicTok // List tokens. // // Lists all the valid tokens for a user-workspace pair. -func (a *tokensPreviewImpl) ListAll(ctx context.Context) ([]PublicTokenInfo, error) { +func (a *tokensImpl) ListAll(ctx context.Context) ([]PublicTokenInfo, error) { iterator := a.List(ctx) return listing.ToSlice[PublicTokenInfo](ctx, iterator) } -func (a *tokensPreviewImpl) internalList(ctx context.Context) (*ListPublicTokensResponse, error) { +func (a *tokensImpl) internalList(ctx context.Context) (*ListPublicTokensResponse, error) { var listPublicTokensResponse ListPublicTokensResponse path := "/api/2.0preview/token/list" @@ -1124,12 +1114,12 @@ func (a *tokensPreviewImpl) internalList(ctx context.Context) (*ListPublicTokens return &listPublicTokensResponse, err } -// unexported type that holds implementations of just WorkspaceConfPreview API methods -type workspaceConfPreviewImpl struct { +// unexported type that holds implementations of just WorkspaceConf API methods +type workspaceConfImpl struct { client *client.DatabricksClient } -func (a *workspaceConfPreviewImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*map[string]string, error) { +func (a *workspaceConfImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*map[string]string, error) { var workspaceConf map[string]string path := "/api/2.0preview/workspace-conf" queryParams := make(map[string]any) @@ -1139,7 +1129,7 @@ func (a *workspaceConfPreviewImpl) GetStatus(ctx context.Context, request GetSta return &workspaceConf, err } -func (a *workspaceConfPreviewImpl) SetStatus(ctx context.Context, request WorkspaceConf) error { +func (a *workspaceConfImpl) SetStatus(ctx context.Context, request WorkspaceConf) error { var setStatusResponse SetStatusResponse path := "/api/2.0preview/workspace-conf" queryParams := make(map[string]any) diff --git a/sharing/v2/model.go b/sharing/v2/model.go index b0d2e4fac..bce3827a2 100755 --- a/sharing/v2/model.go +++ b/sharing/v2/model.go @@ -420,11 +420,11 @@ func (f *PartitionValueOp) Type() string { type PermissionsChange struct { // The set of privileges to add. - Add []SharingPrivilege `json:"add,omitempty"` + Add []Privilege `json:"add,omitempty"` // The principal whose privileges we are changing. Principal string `json:"principal,omitempty"` // The set of privileges to remove. - Remove []SharingPrivilege `json:"remove,omitempty"` + Remove []Privilege `json:"remove,omitempty"` ForceSendFields []string `json:"-"` } @@ -439,7 +439,7 @@ func (s PermissionsChange) MarshalJSON() ([]byte, error) { type PermissionsList struct { // The privileges assigned to each principal - PrivilegeAssignments []SharingPrivilegeAssignment `json:"privilege_assignments,omitempty"` + PrivilegeAssignments []PrivilegeAssignment `json:"privilege_assignments,omitempty"` } type Privilege string @@ -1063,136 +1063,6 @@ func (f *SharedDataObjectUpdateAction) Type() string { return "SharedDataObjectUpdateAction" } -type SharingPrivilege string - -const SharingPrivilegeAccess SharingPrivilege = `ACCESS` - -const SharingPrivilegeAllPrivileges SharingPrivilege = `ALL_PRIVILEGES` - -const SharingPrivilegeApplyTag SharingPrivilege = `APPLY_TAG` - -const SharingPrivilegeCreate SharingPrivilege = `CREATE` - -const SharingPrivilegeCreateCatalog SharingPrivilege = `CREATE_CATALOG` - -const SharingPrivilegeCreateConnection SharingPrivilege = `CREATE_CONNECTION` - -const SharingPrivilegeCreateExternalLocation SharingPrivilege = `CREATE_EXTERNAL_LOCATION` - -const SharingPrivilegeCreateExternalTable SharingPrivilege = `CREATE_EXTERNAL_TABLE` - -const SharingPrivilegeCreateExternalVolume SharingPrivilege = `CREATE_EXTERNAL_VOLUME` - -const SharingPrivilegeCreateForeignCatalog SharingPrivilege = `CREATE_FOREIGN_CATALOG` - -const SharingPrivilegeCreateForeignSecurable SharingPrivilege = `CREATE_FOREIGN_SECURABLE` - -const SharingPrivilegeCreateFunction SharingPrivilege = `CREATE_FUNCTION` - -const SharingPrivilegeCreateManagedStorage SharingPrivilege = `CREATE_MANAGED_STORAGE` - -const SharingPrivilegeCreateMaterializedView SharingPrivilege = `CREATE_MATERIALIZED_VIEW` - -const SharingPrivilegeCreateModel SharingPrivilege = `CREATE_MODEL` - -const SharingPrivilegeCreateProvider SharingPrivilege = `CREATE_PROVIDER` - -const SharingPrivilegeCreateRecipient SharingPrivilege = `CREATE_RECIPIENT` - -const SharingPrivilegeCreateSchema SharingPrivilege = `CREATE_SCHEMA` - -const SharingPrivilegeCreateServiceCredential SharingPrivilege = `CREATE_SERVICE_CREDENTIAL` - -const SharingPrivilegeCreateShare SharingPrivilege = `CREATE_SHARE` - -const SharingPrivilegeCreateStorageCredential SharingPrivilege = `CREATE_STORAGE_CREDENTIAL` - -const SharingPrivilegeCreateTable SharingPrivilege = `CREATE_TABLE` - -const SharingPrivilegeCreateView SharingPrivilege = `CREATE_VIEW` - -const SharingPrivilegeCreateVolume SharingPrivilege = `CREATE_VOLUME` - -const SharingPrivilegeExecute SharingPrivilege = `EXECUTE` - -const SharingPrivilegeManage SharingPrivilege = `MANAGE` - -const SharingPrivilegeManageAllowlist SharingPrivilege = `MANAGE_ALLOWLIST` - -const SharingPrivilegeModify SharingPrivilege = `MODIFY` - -const SharingPrivilegeReadFiles SharingPrivilege = `READ_FILES` - -const SharingPrivilegeReadPrivateFiles SharingPrivilege = `READ_PRIVATE_FILES` - -const SharingPrivilegeReadVolume SharingPrivilege = `READ_VOLUME` - -const SharingPrivilegeRefresh SharingPrivilege = `REFRESH` - -const SharingPrivilegeSelect SharingPrivilege = `SELECT` - -const SharingPrivilegeSetSharePermission SharingPrivilege = `SET_SHARE_PERMISSION` - -const SharingPrivilegeUsage SharingPrivilege = `USAGE` - -const SharingPrivilegeUseCatalog SharingPrivilege = `USE_CATALOG` - -const SharingPrivilegeUseConnection SharingPrivilege = `USE_CONNECTION` - -const SharingPrivilegeUseMarketplaceAssets SharingPrivilege = `USE_MARKETPLACE_ASSETS` - -const SharingPrivilegeUseProvider SharingPrivilege = `USE_PROVIDER` - -const SharingPrivilegeUseRecipient SharingPrivilege = `USE_RECIPIENT` - -const SharingPrivilegeUseSchema SharingPrivilege = `USE_SCHEMA` - -const SharingPrivilegeUseShare SharingPrivilege = `USE_SHARE` - -const SharingPrivilegeWriteFiles SharingPrivilege = `WRITE_FILES` - -const SharingPrivilegeWritePrivateFiles SharingPrivilege = `WRITE_PRIVATE_FILES` - -const SharingPrivilegeWriteVolume SharingPrivilege = `WRITE_VOLUME` - -// String representation for [fmt.Print] -func (f *SharingPrivilege) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *SharingPrivilege) Set(v string) error { - switch v { - case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: - *f = SharingPrivilege(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) - } -} - -// Type always returns SharingPrivilege to satisfy [pflag.Value] interface -func (f *SharingPrivilege) Type() string { - return "SharingPrivilege" -} - -type SharingPrivilegeAssignment struct { - // The principal (user email address or group name). - Principal string `json:"principal,omitempty"` - // The privileges assigned to the principal. - Privileges []SharingPrivilege `json:"privileges,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *SharingPrivilegeAssignment) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s SharingPrivilegeAssignment) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - type UpdatePermissionsResponse struct { } diff --git a/sharing/v2preview/api.go b/sharing/v2preview/api.go index 751bb01c1..7e472c823 100755 --- a/sharing/v2preview/api.go +++ b/sharing/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Providers Preview, Recipient Activation Preview, Recipients Preview, Shares Preview, etc. +// These APIs allow you to manage Providers, Recipient Activation, Recipients, Shares, etc. package sharingpreview import ( @@ -12,7 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type ProvidersPreviewInterface interface { +type ProvidersInterface interface { // Create an auth provider. // @@ -66,7 +66,7 @@ type ProvidersPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) - // ProviderInfoNameToMetastoreIdMap calls [ProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].MetastoreId as value. + // ProviderInfoNameToMetastoreIdMap calls [ProvidersAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].MetastoreId as value. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. // @@ -109,9 +109,9 @@ type ProvidersPreviewInterface interface { Update(ctx context.Context, request UpdateProvider) (*ProviderInfo, error) } -func NewProvidersPreview(client *client.DatabricksClient) *ProvidersPreviewAPI { - return &ProvidersPreviewAPI{ - providersPreviewImpl: providersPreviewImpl{ +func NewProviders(client *client.DatabricksClient) *ProvidersAPI { + return &ProvidersAPI{ + providersImpl: providersImpl{ client: client, }, } @@ -120,16 +120,16 @@ func NewProvidersPreview(client *client.DatabricksClient) *ProvidersPreviewAPI { // A data provider is an object representing the organization in the real world // who shares the data. A provider contains shares which further contain the // shared data. -type ProvidersPreviewAPI struct { - providersPreviewImpl +type ProvidersAPI struct { + providersImpl } // Delete a provider. // // Deletes an authentication provider, if the caller is a metastore admin or is // the owner of the provider. -func (a *ProvidersPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.providersPreviewImpl.Delete(ctx, DeleteProviderRequest{ +func (a *ProvidersAPI) DeleteByName(ctx context.Context, name string) error { + return a.providersImpl.Delete(ctx, DeleteProviderRequest{ Name: name, }) } @@ -139,20 +139,20 @@ func (a *ProvidersPreviewAPI) DeleteByName(ctx context.Context, name string) err // Gets a specific authentication provider. The caller must supply the name of // the provider, and must either be a metastore admin or the owner of the // provider. -func (a *ProvidersPreviewAPI) GetByName(ctx context.Context, name string) (*ProviderInfo, error) { - return a.providersPreviewImpl.Get(ctx, GetProviderRequest{ +func (a *ProvidersAPI) GetByName(ctx context.Context, name string) (*ProviderInfo, error) { + return a.providersImpl.Get(ctx, GetProviderRequest{ Name: name, }) } -// ProviderInfoNameToMetastoreIdMap calls [ProvidersPreviewAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].MetastoreId as value. +// ProviderInfoNameToMetastoreIdMap calls [ProvidersAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].MetastoreId as value. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. // // Note: All [ProviderInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ProvidersPreviewAPI) ProviderInfoNameToMetastoreIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) { +func (a *ProvidersAPI) ProviderInfoNameToMetastoreIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -175,13 +175,13 @@ func (a *ProvidersPreviewAPI) ProviderInfoNameToMetastoreIdMap(ctx context.Conte // Gets an array of a specified provider's shares within the metastore where: // // * the caller is a metastore admin, or * the caller is the owner. -func (a *ProvidersPreviewAPI) ListSharesByName(ctx context.Context, name string) (*ListProviderSharesResponse, error) { - return a.providersPreviewImpl.internalListShares(ctx, ListSharesRequest{ +func (a *ProvidersAPI) ListSharesByName(ctx context.Context, name string) (*ListProviderSharesResponse, error) { + return a.providersImpl.internalListShares(ctx, ListSharesRequest{ Name: name, }) } -type RecipientActivationPreviewInterface interface { +type RecipientActivationInterface interface { // Get a share activation URL. // @@ -206,9 +206,9 @@ type RecipientActivationPreviewInterface interface { RetrieveTokenByActivationUrl(ctx context.Context, activationUrl string) (*RetrieveTokenResponse, error) } -func NewRecipientActivationPreview(client *client.DatabricksClient) *RecipientActivationPreviewAPI { - return &RecipientActivationPreviewAPI{ - recipientActivationPreviewImpl: recipientActivationPreviewImpl{ +func NewRecipientActivation(client *client.DatabricksClient) *RecipientActivationAPI { + return &RecipientActivationAPI{ + recipientActivationImpl: recipientActivationImpl{ client: client, }, } @@ -224,15 +224,15 @@ func NewRecipientActivationPreview(client *client.DatabricksClient) *RecipientAc // Note that you can download the credential file only once. Recipients should // treat the downloaded credential as a secret and must not share it outside of // their organization. -type RecipientActivationPreviewAPI struct { - recipientActivationPreviewImpl +type RecipientActivationAPI struct { + recipientActivationImpl } // Get a share activation URL. // // Gets an activation URL for a share. -func (a *RecipientActivationPreviewAPI) GetActivationUrlInfoByActivationUrl(ctx context.Context, activationUrl string) error { - return a.recipientActivationPreviewImpl.GetActivationUrlInfo(ctx, GetActivationUrlInfoRequest{ +func (a *RecipientActivationAPI) GetActivationUrlInfoByActivationUrl(ctx context.Context, activationUrl string) error { + return a.recipientActivationImpl.GetActivationUrlInfo(ctx, GetActivationUrlInfoRequest{ ActivationUrl: activationUrl, }) } @@ -241,13 +241,13 @@ func (a *RecipientActivationPreviewAPI) GetActivationUrlInfoByActivationUrl(ctx // // Retrieve access token with an activation url. This is a public API without // any authentication. -func (a *RecipientActivationPreviewAPI) RetrieveTokenByActivationUrl(ctx context.Context, activationUrl string) (*RetrieveTokenResponse, error) { - return a.recipientActivationPreviewImpl.RetrieveToken(ctx, RetrieveTokenRequest{ +func (a *RecipientActivationAPI) RetrieveTokenByActivationUrl(ctx context.Context, activationUrl string) (*RetrieveTokenResponse, error) { + return a.recipientActivationImpl.RetrieveToken(ctx, RetrieveTokenRequest{ ActivationUrl: activationUrl, }) } -type RecipientsPreviewInterface interface { +type RecipientsInterface interface { // Create a share recipient. // @@ -329,9 +329,9 @@ type RecipientsPreviewInterface interface { Update(ctx context.Context, request UpdateRecipient) (*RecipientInfo, error) } -func NewRecipientsPreview(client *client.DatabricksClient) *RecipientsPreviewAPI { - return &RecipientsPreviewAPI{ - recipientsPreviewImpl: recipientsPreviewImpl{ +func NewRecipients(client *client.DatabricksClient) *RecipientsAPI { + return &RecipientsAPI{ + recipientsImpl: recipientsImpl{ client: client, }, } @@ -354,16 +354,16 @@ func NewRecipientsPreview(client *client.DatabricksClient) *RecipientsPreviewAPI // activation link to download the credential file, and then uses the credential // file to establish a secure connection to receive the shared data. This // sharing mode is called **open sharing**. -type RecipientsPreviewAPI struct { - recipientsPreviewImpl +type RecipientsAPI struct { + recipientsImpl } // Delete a share recipient. // // Deletes the specified recipient from the metastore. The caller must be the // owner of the recipient. -func (a *RecipientsPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.recipientsPreviewImpl.Delete(ctx, DeleteRecipientRequest{ +func (a *RecipientsAPI) DeleteByName(ctx context.Context, name string) error { + return a.recipientsImpl.Delete(ctx, DeleteRecipientRequest{ Name: name, }) } @@ -373,8 +373,8 @@ func (a *RecipientsPreviewAPI) DeleteByName(ctx context.Context, name string) er // Gets a share recipient from the metastore if: // // * the caller is the owner of the share recipient, or: * is a metastore admin -func (a *RecipientsPreviewAPI) GetByName(ctx context.Context, name string) (*RecipientInfo, error) { - return a.recipientsPreviewImpl.Get(ctx, GetRecipientRequest{ +func (a *RecipientsAPI) GetByName(ctx context.Context, name string) (*RecipientInfo, error) { + return a.recipientsImpl.Get(ctx, GetRecipientRequest{ Name: name, }) } @@ -383,13 +383,13 @@ func (a *RecipientsPreviewAPI) GetByName(ctx context.Context, name string) (*Rec // // Gets the share permissions for the specified Recipient. The caller must be a // metastore admin or the owner of the Recipient. -func (a *RecipientsPreviewAPI) SharePermissionsByName(ctx context.Context, name string) (*GetRecipientSharePermissionsResponse, error) { - return a.recipientsPreviewImpl.SharePermissions(ctx, SharePermissionsRequest{ +func (a *RecipientsAPI) SharePermissionsByName(ctx context.Context, name string) (*GetRecipientSharePermissionsResponse, error) { + return a.recipientsImpl.SharePermissions(ctx, SharePermissionsRequest{ Name: name, }) } -type SharesPreviewInterface interface { +type SharesInterface interface { // Create a share. // @@ -484,9 +484,9 @@ type SharesPreviewInterface interface { UpdatePermissions(ctx context.Context, request UpdateSharePermissions) error } -func NewSharesPreview(client *client.DatabricksClient) *SharesPreviewAPI { - return &SharesPreviewAPI{ - sharesPreviewImpl: sharesPreviewImpl{ +func NewShares(client *client.DatabricksClient) *SharesAPI { + return &SharesAPI{ + sharesImpl: sharesImpl{ client: client, }, } @@ -497,16 +497,16 @@ func NewSharesPreview(client *client.DatabricksClient) *SharesPreviewAPI { // within the metastore using :method:shares/update. You can register data // assets under their original name, qualified by their original schema, or // provide alternate exposed names. -type SharesPreviewAPI struct { - sharesPreviewImpl +type SharesAPI struct { + sharesImpl } // Delete a share. // // Deletes a data object share from the metastore. The caller must be an owner // of the share. -func (a *SharesPreviewAPI) DeleteByName(ctx context.Context, name string) error { - return a.sharesPreviewImpl.Delete(ctx, DeleteShareRequest{ +func (a *SharesAPI) DeleteByName(ctx context.Context, name string) error { + return a.sharesImpl.Delete(ctx, DeleteShareRequest{ Name: name, }) } @@ -515,8 +515,8 @@ func (a *SharesPreviewAPI) DeleteByName(ctx context.Context, name string) error // // Gets a data object share from the metastore. The caller must be a metastore // admin or the owner of the share. -func (a *SharesPreviewAPI) GetByName(ctx context.Context, name string) (*ShareInfo, error) { - return a.sharesPreviewImpl.Get(ctx, GetShareRequest{ +func (a *SharesAPI) GetByName(ctx context.Context, name string) (*ShareInfo, error) { + return a.sharesImpl.Get(ctx, GetShareRequest{ Name: name, }) } @@ -525,8 +525,8 @@ func (a *SharesPreviewAPI) GetByName(ctx context.Context, name string) (*ShareIn // // Gets the permissions for a data share from the metastore. The caller must be // a metastore admin or the owner of the share. -func (a *SharesPreviewAPI) SharePermissionsByName(ctx context.Context, name string) (*PermissionsList, error) { - return a.sharesPreviewImpl.SharePermissions(ctx, SharePermissionsRequest{ +func (a *SharesAPI) SharePermissionsByName(ctx context.Context, name string) (*PermissionsList, error) { + return a.sharesImpl.SharePermissions(ctx, SharePermissionsRequest{ Name: name, }) } diff --git a/sharing/v2preview/client.go b/sharing/v2preview/client.go index 6fd9bc330..db3bbc32d 100755 --- a/sharing/v2preview/client.go +++ b/sharing/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type ProvidersPreviewClient struct { - ProvidersPreviewInterface +type ProvidersClient struct { + ProvidersInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewProvidersPreviewClient(cfg *config.Config) (*ProvidersPreviewClient, error) { +func NewProvidersClient(cfg *config.Config) (*ProvidersClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewProvidersPreviewClient(cfg *config.Config) (*ProvidersPreviewClient, err return nil, err } - return &ProvidersPreviewClient{ - Config: cfg, - apiClient: apiClient, - ProvidersPreviewInterface: NewProvidersPreview(databricksClient), + return &ProvidersClient{ + Config: cfg, + apiClient: apiClient, + ProvidersInterface: NewProviders(databricksClient), }, nil } -type RecipientActivationPreviewClient struct { - RecipientActivationPreviewInterface +type RecipientActivationClient struct { + RecipientActivationInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewRecipientActivationPreviewClient(cfg *config.Config) (*RecipientActivationPreviewClient, error) { +func NewRecipientActivationClient(cfg *config.Config) (*RecipientActivationClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,20 +71,20 @@ func NewRecipientActivationPreviewClient(cfg *config.Config) (*RecipientActivati return nil, err } - return &RecipientActivationPreviewClient{ - Config: cfg, - apiClient: apiClient, - RecipientActivationPreviewInterface: NewRecipientActivationPreview(databricksClient), + return &RecipientActivationClient{ + Config: cfg, + apiClient: apiClient, + RecipientActivationInterface: NewRecipientActivation(databricksClient), }, nil } -type RecipientsPreviewClient struct { - RecipientsPreviewInterface +type RecipientsClient struct { + RecipientsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewRecipientsPreviewClient(cfg *config.Config) (*RecipientsPreviewClient, error) { +func NewRecipientsClient(cfg *config.Config) (*RecipientsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -105,20 +105,20 @@ func NewRecipientsPreviewClient(cfg *config.Config) (*RecipientsPreviewClient, e return nil, err } - return &RecipientsPreviewClient{ - Config: cfg, - apiClient: apiClient, - RecipientsPreviewInterface: NewRecipientsPreview(databricksClient), + return &RecipientsClient{ + Config: cfg, + apiClient: apiClient, + RecipientsInterface: NewRecipients(databricksClient), }, nil } -type SharesPreviewClient struct { - SharesPreviewInterface +type SharesClient struct { + SharesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewSharesPreviewClient(cfg *config.Config) (*SharesPreviewClient, error) { +func NewSharesClient(cfg *config.Config) (*SharesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -139,9 +139,9 @@ func NewSharesPreviewClient(cfg *config.Config) (*SharesPreviewClient, error) { return nil, err } - return &SharesPreviewClient{ - Config: cfg, - apiClient: apiClient, - SharesPreviewInterface: NewSharesPreview(databricksClient), + return &SharesClient{ + Config: cfg, + apiClient: apiClient, + SharesInterface: NewShares(databricksClient), }, nil } diff --git a/sharing/v2preview/impl.go b/sharing/v2preview/impl.go index eecd64f1e..84d648c1a 100755 --- a/sharing/v2preview/impl.go +++ b/sharing/v2preview/impl.go @@ -13,12 +13,12 @@ import ( "golang.org/x/exp/slices" ) -// unexported type that holds implementations of just ProvidersPreview API methods -type providersPreviewImpl struct { +// unexported type that holds implementations of just Providers API methods +type providersImpl struct { client *client.DatabricksClient } -func (a *providersPreviewImpl) Create(ctx context.Context, request CreateProvider) (*ProviderInfo, error) { +func (a *providersImpl) Create(ctx context.Context, request CreateProvider) (*ProviderInfo, error) { var providerInfo ProviderInfo path := "/api/2.1preview/unity-catalog/providers" queryParams := make(map[string]any) @@ -29,7 +29,7 @@ func (a *providersPreviewImpl) Create(ctx context.Context, request CreateProvide return &providerInfo, err } -func (a *providersPreviewImpl) Delete(ctx context.Context, request DeleteProviderRequest) error { +func (a *providersImpl) Delete(ctx context.Context, request DeleteProviderRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/providers/%v", request.Name) queryParams := make(map[string]any) @@ -39,7 +39,7 @@ func (a *providersPreviewImpl) Delete(ctx context.Context, request DeleteProvide return err } -func (a *providersPreviewImpl) Get(ctx context.Context, request GetProviderRequest) (*ProviderInfo, error) { +func (a *providersImpl) Get(ctx context.Context, request GetProviderRequest) (*ProviderInfo, error) { var providerInfo ProviderInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/providers/%v", request.Name) queryParams := make(map[string]any) @@ -55,7 +55,7 @@ func (a *providersPreviewImpl) Get(ctx context.Context, request GetProviderReque // be a metastore admin or the owner of the providers. Providers not owned by // the caller are not included in the response. There is no guarantee of a // specific ordering of the elements in the array. -func (a *providersPreviewImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { +func (a *providersImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -85,11 +85,11 @@ func (a *providersPreviewImpl) List(ctx context.Context, request ListProvidersRe // be a metastore admin or the owner of the providers. Providers not owned by // the caller are not included in the response. There is no guarantee of a // specific ordering of the elements in the array. -func (a *providersPreviewImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { +func (a *providersImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[ProviderInfo](ctx, iterator) } -func (a *providersPreviewImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { +func (a *providersImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { var listProvidersResponse ListProvidersResponse path := "/api/2.1preview/unity-catalog/providers" queryParams := make(map[string]any) @@ -104,7 +104,7 @@ func (a *providersPreviewImpl) internalList(ctx context.Context, request ListPro // Gets an array of a specified provider's shares within the metastore where: // // * the caller is a metastore admin, or * the caller is the owner. -func (a *providersPreviewImpl) ListShares(ctx context.Context, request ListSharesRequest) listing.Iterator[ProviderShare] { +func (a *providersImpl) ListShares(ctx context.Context, request ListSharesRequest) listing.Iterator[ProviderShare] { getNextPage := func(ctx context.Context, req ListSharesRequest) (*ListProviderSharesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -133,11 +133,11 @@ func (a *providersPreviewImpl) ListShares(ctx context.Context, request ListShare // Gets an array of a specified provider's shares within the metastore where: // // * the caller is a metastore admin, or * the caller is the owner. -func (a *providersPreviewImpl) ListSharesAll(ctx context.Context, request ListSharesRequest) ([]ProviderShare, error) { +func (a *providersImpl) ListSharesAll(ctx context.Context, request ListSharesRequest) ([]ProviderShare, error) { iterator := a.ListShares(ctx, request) return listing.ToSlice[ProviderShare](ctx, iterator) } -func (a *providersPreviewImpl) internalListShares(ctx context.Context, request ListSharesRequest) (*ListProviderSharesResponse, error) { +func (a *providersImpl) internalListShares(ctx context.Context, request ListSharesRequest) (*ListProviderSharesResponse, error) { var listProviderSharesResponse ListProviderSharesResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/providers/%v/shares", request.Name) queryParams := make(map[string]any) @@ -147,7 +147,7 @@ func (a *providersPreviewImpl) internalListShares(ctx context.Context, request L return &listProviderSharesResponse, err } -func (a *providersPreviewImpl) Update(ctx context.Context, request UpdateProvider) (*ProviderInfo, error) { +func (a *providersImpl) Update(ctx context.Context, request UpdateProvider) (*ProviderInfo, error) { var providerInfo ProviderInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/providers/%v", request.Name) queryParams := make(map[string]any) @@ -158,12 +158,12 @@ func (a *providersPreviewImpl) Update(ctx context.Context, request UpdateProvide return &providerInfo, err } -// unexported type that holds implementations of just RecipientActivationPreview API methods -type recipientActivationPreviewImpl struct { +// unexported type that holds implementations of just RecipientActivation API methods +type recipientActivationImpl struct { client *client.DatabricksClient } -func (a *recipientActivationPreviewImpl) GetActivationUrlInfo(ctx context.Context, request GetActivationUrlInfoRequest) error { +func (a *recipientActivationImpl) GetActivationUrlInfo(ctx context.Context, request GetActivationUrlInfoRequest) error { var getActivationUrlInfoResponse GetActivationUrlInfoResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/public/data_sharing_activation_info/%v", request.ActivationUrl) queryParams := make(map[string]any) @@ -173,7 +173,7 @@ func (a *recipientActivationPreviewImpl) GetActivationUrlInfo(ctx context.Contex return err } -func (a *recipientActivationPreviewImpl) RetrieveToken(ctx context.Context, request RetrieveTokenRequest) (*RetrieveTokenResponse, error) { +func (a *recipientActivationImpl) RetrieveToken(ctx context.Context, request RetrieveTokenRequest) (*RetrieveTokenResponse, error) { var retrieveTokenResponse RetrieveTokenResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/public/data_sharing_activation/%v", request.ActivationUrl) queryParams := make(map[string]any) @@ -183,12 +183,12 @@ func (a *recipientActivationPreviewImpl) RetrieveToken(ctx context.Context, requ return &retrieveTokenResponse, err } -// unexported type that holds implementations of just RecipientsPreview API methods -type recipientsPreviewImpl struct { +// unexported type that holds implementations of just Recipients API methods +type recipientsImpl struct { client *client.DatabricksClient } -func (a *recipientsPreviewImpl) Create(ctx context.Context, request CreateRecipient) (*RecipientInfo, error) { +func (a *recipientsImpl) Create(ctx context.Context, request CreateRecipient) (*RecipientInfo, error) { var recipientInfo RecipientInfo path := "/api/2.1preview/unity-catalog/recipients" queryParams := make(map[string]any) @@ -199,7 +199,7 @@ func (a *recipientsPreviewImpl) Create(ctx context.Context, request CreateRecipi return &recipientInfo, err } -func (a *recipientsPreviewImpl) Delete(ctx context.Context, request DeleteRecipientRequest) error { +func (a *recipientsImpl) Delete(ctx context.Context, request DeleteRecipientRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/recipients/%v", request.Name) queryParams := make(map[string]any) @@ -209,7 +209,7 @@ func (a *recipientsPreviewImpl) Delete(ctx context.Context, request DeleteRecipi return err } -func (a *recipientsPreviewImpl) Get(ctx context.Context, request GetRecipientRequest) (*RecipientInfo, error) { +func (a *recipientsImpl) Get(ctx context.Context, request GetRecipientRequest) (*RecipientInfo, error) { var recipientInfo RecipientInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/recipients/%v", request.Name) queryParams := make(map[string]any) @@ -225,7 +225,7 @@ func (a *recipientsPreviewImpl) Get(ctx context.Context, request GetRecipientReq // // * the caller is a metastore admin, or * the caller is the owner. There is no // guarantee of a specific ordering of the elements in the array. -func (a *recipientsPreviewImpl) List(ctx context.Context, request ListRecipientsRequest) listing.Iterator[RecipientInfo] { +func (a *recipientsImpl) List(ctx context.Context, request ListRecipientsRequest) listing.Iterator[RecipientInfo] { getNextPage := func(ctx context.Context, req ListRecipientsRequest) (*ListRecipientsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -255,11 +255,11 @@ func (a *recipientsPreviewImpl) List(ctx context.Context, request ListRecipients // // * the caller is a metastore admin, or * the caller is the owner. There is no // guarantee of a specific ordering of the elements in the array. -func (a *recipientsPreviewImpl) ListAll(ctx context.Context, request ListRecipientsRequest) ([]RecipientInfo, error) { +func (a *recipientsImpl) ListAll(ctx context.Context, request ListRecipientsRequest) ([]RecipientInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[RecipientInfo](ctx, iterator) } -func (a *recipientsPreviewImpl) internalList(ctx context.Context, request ListRecipientsRequest) (*ListRecipientsResponse, error) { +func (a *recipientsImpl) internalList(ctx context.Context, request ListRecipientsRequest) (*ListRecipientsResponse, error) { var listRecipientsResponse ListRecipientsResponse path := "/api/2.1preview/unity-catalog/recipients" queryParams := make(map[string]any) @@ -269,7 +269,7 @@ func (a *recipientsPreviewImpl) internalList(ctx context.Context, request ListRe return &listRecipientsResponse, err } -func (a *recipientsPreviewImpl) RotateToken(ctx context.Context, request RotateRecipientToken) (*RecipientInfo, error) { +func (a *recipientsImpl) RotateToken(ctx context.Context, request RotateRecipientToken) (*RecipientInfo, error) { var recipientInfo RecipientInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/recipients/%v/rotate-token", request.Name) queryParams := make(map[string]any) @@ -280,7 +280,7 @@ func (a *recipientsPreviewImpl) RotateToken(ctx context.Context, request RotateR return &recipientInfo, err } -func (a *recipientsPreviewImpl) SharePermissions(ctx context.Context, request SharePermissionsRequest) (*GetRecipientSharePermissionsResponse, error) { +func (a *recipientsImpl) SharePermissions(ctx context.Context, request SharePermissionsRequest) (*GetRecipientSharePermissionsResponse, error) { var getRecipientSharePermissionsResponse GetRecipientSharePermissionsResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/recipients/%v/share-permissions", request.Name) queryParams := make(map[string]any) @@ -290,7 +290,7 @@ func (a *recipientsPreviewImpl) SharePermissions(ctx context.Context, request Sh return &getRecipientSharePermissionsResponse, err } -func (a *recipientsPreviewImpl) Update(ctx context.Context, request UpdateRecipient) (*RecipientInfo, error) { +func (a *recipientsImpl) Update(ctx context.Context, request UpdateRecipient) (*RecipientInfo, error) { var recipientInfo RecipientInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/recipients/%v", request.Name) queryParams := make(map[string]any) @@ -301,12 +301,12 @@ func (a *recipientsPreviewImpl) Update(ctx context.Context, request UpdateRecipi return &recipientInfo, err } -// unexported type that holds implementations of just SharesPreview API methods -type sharesPreviewImpl struct { +// unexported type that holds implementations of just Shares API methods +type sharesImpl struct { client *client.DatabricksClient } -func (a *sharesPreviewImpl) Create(ctx context.Context, request CreateShare) (*ShareInfo, error) { +func (a *sharesImpl) Create(ctx context.Context, request CreateShare) (*ShareInfo, error) { var shareInfo ShareInfo path := "/api/2.1preview/unity-catalog/shares" queryParams := make(map[string]any) @@ -317,7 +317,7 @@ func (a *sharesPreviewImpl) Create(ctx context.Context, request CreateShare) (*S return &shareInfo, err } -func (a *sharesPreviewImpl) Delete(ctx context.Context, request DeleteShareRequest) error { +func (a *sharesImpl) Delete(ctx context.Context, request DeleteShareRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/shares/%v", request.Name) queryParams := make(map[string]any) @@ -327,7 +327,7 @@ func (a *sharesPreviewImpl) Delete(ctx context.Context, request DeleteShareReque return err } -func (a *sharesPreviewImpl) Get(ctx context.Context, request GetShareRequest) (*ShareInfo, error) { +func (a *sharesImpl) Get(ctx context.Context, request GetShareRequest) (*ShareInfo, error) { var shareInfo ShareInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/shares/%v", request.Name) queryParams := make(map[string]any) @@ -342,7 +342,7 @@ func (a *sharesPreviewImpl) Get(ctx context.Context, request GetShareRequest) (* // Gets an array of data object shares from the metastore. The caller must be a // metastore admin or the owner of the share. There is no guarantee of a // specific ordering of the elements in the array. -func (a *sharesPreviewImpl) List(ctx context.Context, request ListSharesRequest) listing.Iterator[ShareInfo] { +func (a *sharesImpl) List(ctx context.Context, request ListSharesRequest) listing.Iterator[ShareInfo] { getNextPage := func(ctx context.Context, req ListSharesRequest) (*ListSharesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -371,11 +371,11 @@ func (a *sharesPreviewImpl) List(ctx context.Context, request ListSharesRequest) // Gets an array of data object shares from the metastore. The caller must be a // metastore admin or the owner of the share. There is no guarantee of a // specific ordering of the elements in the array. -func (a *sharesPreviewImpl) ListAll(ctx context.Context, request ListSharesRequest) ([]ShareInfo, error) { +func (a *sharesImpl) ListAll(ctx context.Context, request ListSharesRequest) ([]ShareInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[ShareInfo](ctx, iterator) } -func (a *sharesPreviewImpl) internalList(ctx context.Context, request ListSharesRequest) (*ListSharesResponse, error) { +func (a *sharesImpl) internalList(ctx context.Context, request ListSharesRequest) (*ListSharesResponse, error) { var listSharesResponse ListSharesResponse path := "/api/2.1preview/unity-catalog/shares" queryParams := make(map[string]any) @@ -385,7 +385,7 @@ func (a *sharesPreviewImpl) internalList(ctx context.Context, request ListShares return &listSharesResponse, err } -func (a *sharesPreviewImpl) SharePermissions(ctx context.Context, request SharePermissionsRequest) (*PermissionsList, error) { +func (a *sharesImpl) SharePermissions(ctx context.Context, request SharePermissionsRequest) (*PermissionsList, error) { var permissionsList PermissionsList path := fmt.Sprintf("/api/2.1preview/unity-catalog/shares/%v/permissions", request.Name) queryParams := make(map[string]any) @@ -395,7 +395,7 @@ func (a *sharesPreviewImpl) SharePermissions(ctx context.Context, request ShareP return &permissionsList, err } -func (a *sharesPreviewImpl) Update(ctx context.Context, request UpdateShare) (*ShareInfo, error) { +func (a *sharesImpl) Update(ctx context.Context, request UpdateShare) (*ShareInfo, error) { var shareInfo ShareInfo path := fmt.Sprintf("/api/2.1preview/unity-catalog/shares/%v", request.Name) queryParams := make(map[string]any) @@ -406,7 +406,7 @@ func (a *sharesPreviewImpl) Update(ctx context.Context, request UpdateShare) (*S return &shareInfo, err } -func (a *sharesPreviewImpl) UpdatePermissions(ctx context.Context, request UpdateSharePermissions) error { +func (a *sharesImpl) UpdatePermissions(ctx context.Context, request UpdateSharePermissions) error { var updatePermissionsResponse UpdatePermissionsResponse path := fmt.Sprintf("/api/2.1preview/unity-catalog/shares/%v/permissions", request.Name) queryParams := make(map[string]any) diff --git a/sharing/v2preview/model.go b/sharing/v2preview/model.go index 8007814c8..b5432fb2a 100755 --- a/sharing/v2preview/model.go +++ b/sharing/v2preview/model.go @@ -420,11 +420,11 @@ func (f *PartitionValueOp) Type() string { type PermissionsChange struct { // The set of privileges to add. - Add []SharingPrivilege `json:"add,omitempty"` + Add []Privilege `json:"add,omitempty"` // The principal whose privileges we are changing. Principal string `json:"principal,omitempty"` // The set of privileges to remove. - Remove []SharingPrivilege `json:"remove,omitempty"` + Remove []Privilege `json:"remove,omitempty"` ForceSendFields []string `json:"-"` } @@ -439,7 +439,7 @@ func (s PermissionsChange) MarshalJSON() ([]byte, error) { type PermissionsList struct { // The privileges assigned to each principal - PrivilegeAssignments []SharingPrivilegeAssignment `json:"privilege_assignments,omitempty"` + PrivilegeAssignments []PrivilegeAssignment `json:"privilege_assignments,omitempty"` } type Privilege string @@ -1063,136 +1063,6 @@ func (f *SharedDataObjectUpdateAction) Type() string { return "SharedDataObjectUpdateAction" } -type SharingPrivilege string - -const SharingPrivilegeAccess SharingPrivilege = `ACCESS` - -const SharingPrivilegeAllPrivileges SharingPrivilege = `ALL_PRIVILEGES` - -const SharingPrivilegeApplyTag SharingPrivilege = `APPLY_TAG` - -const SharingPrivilegeCreate SharingPrivilege = `CREATE` - -const SharingPrivilegeCreateCatalog SharingPrivilege = `CREATE_CATALOG` - -const SharingPrivilegeCreateConnection SharingPrivilege = `CREATE_CONNECTION` - -const SharingPrivilegeCreateExternalLocation SharingPrivilege = `CREATE_EXTERNAL_LOCATION` - -const SharingPrivilegeCreateExternalTable SharingPrivilege = `CREATE_EXTERNAL_TABLE` - -const SharingPrivilegeCreateExternalVolume SharingPrivilege = `CREATE_EXTERNAL_VOLUME` - -const SharingPrivilegeCreateForeignCatalog SharingPrivilege = `CREATE_FOREIGN_CATALOG` - -const SharingPrivilegeCreateForeignSecurable SharingPrivilege = `CREATE_FOREIGN_SECURABLE` - -const SharingPrivilegeCreateFunction SharingPrivilege = `CREATE_FUNCTION` - -const SharingPrivilegeCreateManagedStorage SharingPrivilege = `CREATE_MANAGED_STORAGE` - -const SharingPrivilegeCreateMaterializedView SharingPrivilege = `CREATE_MATERIALIZED_VIEW` - -const SharingPrivilegeCreateModel SharingPrivilege = `CREATE_MODEL` - -const SharingPrivilegeCreateProvider SharingPrivilege = `CREATE_PROVIDER` - -const SharingPrivilegeCreateRecipient SharingPrivilege = `CREATE_RECIPIENT` - -const SharingPrivilegeCreateSchema SharingPrivilege = `CREATE_SCHEMA` - -const SharingPrivilegeCreateServiceCredential SharingPrivilege = `CREATE_SERVICE_CREDENTIAL` - -const SharingPrivilegeCreateShare SharingPrivilege = `CREATE_SHARE` - -const SharingPrivilegeCreateStorageCredential SharingPrivilege = `CREATE_STORAGE_CREDENTIAL` - -const SharingPrivilegeCreateTable SharingPrivilege = `CREATE_TABLE` - -const SharingPrivilegeCreateView SharingPrivilege = `CREATE_VIEW` - -const SharingPrivilegeCreateVolume SharingPrivilege = `CREATE_VOLUME` - -const SharingPrivilegeExecute SharingPrivilege = `EXECUTE` - -const SharingPrivilegeManage SharingPrivilege = `MANAGE` - -const SharingPrivilegeManageAllowlist SharingPrivilege = `MANAGE_ALLOWLIST` - -const SharingPrivilegeModify SharingPrivilege = `MODIFY` - -const SharingPrivilegeReadFiles SharingPrivilege = `READ_FILES` - -const SharingPrivilegeReadPrivateFiles SharingPrivilege = `READ_PRIVATE_FILES` - -const SharingPrivilegeReadVolume SharingPrivilege = `READ_VOLUME` - -const SharingPrivilegeRefresh SharingPrivilege = `REFRESH` - -const SharingPrivilegeSelect SharingPrivilege = `SELECT` - -const SharingPrivilegeSetSharePermission SharingPrivilege = `SET_SHARE_PERMISSION` - -const SharingPrivilegeUsage SharingPrivilege = `USAGE` - -const SharingPrivilegeUseCatalog SharingPrivilege = `USE_CATALOG` - -const SharingPrivilegeUseConnection SharingPrivilege = `USE_CONNECTION` - -const SharingPrivilegeUseMarketplaceAssets SharingPrivilege = `USE_MARKETPLACE_ASSETS` - -const SharingPrivilegeUseProvider SharingPrivilege = `USE_PROVIDER` - -const SharingPrivilegeUseRecipient SharingPrivilege = `USE_RECIPIENT` - -const SharingPrivilegeUseSchema SharingPrivilege = `USE_SCHEMA` - -const SharingPrivilegeUseShare SharingPrivilege = `USE_SHARE` - -const SharingPrivilegeWriteFiles SharingPrivilege = `WRITE_FILES` - -const SharingPrivilegeWritePrivateFiles SharingPrivilege = `WRITE_PRIVATE_FILES` - -const SharingPrivilegeWriteVolume SharingPrivilege = `WRITE_VOLUME` - -// String representation for [fmt.Print] -func (f *SharingPrivilege) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *SharingPrivilege) Set(v string) error { - switch v { - case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: - *f = SharingPrivilege(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) - } -} - -// Type always returns SharingPrivilege to satisfy [pflag.Value] interface -func (f *SharingPrivilege) Type() string { - return "SharingPrivilege" -} - -type SharingPrivilegeAssignment struct { - // The principal (user email address or group name). - Principal string `json:"principal,omitempty"` - // The privileges assigned to the principal. - Privileges []SharingPrivilege `json:"privileges,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *SharingPrivilegeAssignment) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s SharingPrivilegeAssignment) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - type UpdatePermissionsResponse struct { } diff --git a/sql/v2preview/api.go b/sql/v2preview/api.go index 94ebafd2e..c28fb6bdb 100755 --- a/sql/v2preview/api.go +++ b/sql/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Alerts Legacy Preview, Alerts Preview, Dashboard Widgets Preview, Dashboards Preview, Data Sources Preview, Dbsql Permissions Preview, Queries Legacy Preview, Queries Preview, Query History Preview, Query Visualizations Legacy Preview, Query Visualizations Preview, Redash Config Preview, Statement Execution Preview, Warehouses Preview, etc. +// These APIs allow you to manage Alerts, Alerts Legacy, Dashboard Widgets, Dashboards, Data Sources, Dbsql Permissions, Queries, Queries Legacy, Query History, Query Visualizations, Query Visualizations Legacy, Redash Config, Statement Execution, Warehouses, etc. package sqlpreview import ( @@ -12,106 +12,82 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type AlertsLegacyPreviewInterface interface { +type AlertsInterface interface { // Create an alert. // - // Creates an alert. An alert is a Databricks SQL object that periodically runs - // a query, evaluates a condition of its result, and notifies users or - // notification destinations if the condition was met. - // - // **Note**: A new version of the Databricks SQL API is now available. Please - // use :method:alerts/create instead. [Learn more] - // - // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - Create(ctx context.Context, request CreateAlert) (*LegacyAlert, error) + // Creates an alert. + Create(ctx context.Context, request CreateAlertRequest) (*Alert, error) // Delete an alert. // - // Deletes an alert. Deleted alerts are no longer accessible and cannot be - // restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to - // the trash. - // - // **Note**: A new version of the Databricks SQL API is now available. Please - // use :method:alerts/delete instead. [Learn more] - // - // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - Delete(ctx context.Context, request DeleteAlertsLegacyRequest) error + // Moves an alert to the trash. Trashed alerts immediately disappear from + // searches and list views, and can no longer trigger. You can restore a trashed + // alert through the UI. A trashed alert is permanently deleted after 30 days. + Delete(ctx context.Context, request TrashAlertRequest) error // Delete an alert. // - // Deletes an alert. Deleted alerts are no longer accessible and cannot be - // restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to - // the trash. - // - // **Note**: A new version of the Databricks SQL API is now available. Please - // use :method:alerts/delete instead. [Learn more] - // - // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - DeleteByAlertId(ctx context.Context, alertId string) error + // Moves an alert to the trash. Trashed alerts immediately disappear from + // searches and list views, and can no longer trigger. You can restore a trashed + // alert through the UI. A trashed alert is permanently deleted after 30 days. + DeleteById(ctx context.Context, id string) error // Get an alert. // // Gets an alert. - // - // **Note**: A new version of the Databricks SQL API is now available. Please - // use :method:alerts/get instead. [Learn more] - // - // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - Get(ctx context.Context, request GetAlertsLegacyRequest) (*LegacyAlert, error) + Get(ctx context.Context, request GetAlertRequest) (*Alert, error) // Get an alert. // // Gets an alert. + GetById(ctx context.Context, id string) (*Alert, error) + + // List alerts. // - // **Note**: A new version of the Databricks SQL API is now available. Please - // use :method:alerts/get instead. [Learn more] + // Gets a list of alerts accessible to the user, ordered by creation time. + // **Warning:** Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. // - // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - GetByAlertId(ctx context.Context, alertId string) (*LegacyAlert, error) + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListAlertsRequest) listing.Iterator[ListAlertsResponseAlert] - // Get alerts. - // - // Gets a list of alerts. + // List alerts. // - // **Note**: A new version of the Databricks SQL API is now available. Please - // use :method:alerts/list instead. [Learn more] + // Gets a list of alerts accessible to the user, ordered by creation time. + // **Warning:** Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. // - // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - List(ctx context.Context) ([]LegacyAlert, error) + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListAlertsRequest) ([]ListAlertsResponseAlert, error) - // LegacyAlertNameToIdMap calls [AlertsLegacyPreviewAPI.List] and creates a map of results with [LegacyAlert].Name as key and [LegacyAlert].Id as value. + // ListAlertsResponseAlertDisplayNameToIdMap calls [AlertsAPI.ListAll] and creates a map of results with [ListAlertsResponseAlert].DisplayName as key and [ListAlertsResponseAlert].Id as value. // - // Returns an error if there's more than one [LegacyAlert] with the same .Name. + // Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. // - // Note: All [LegacyAlert] instances are loaded into memory before creating a map. + // Note: All [ListAlertsResponseAlert] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. - LegacyAlertNameToIdMap(ctx context.Context) (map[string]string, error) + ListAlertsResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsRequest) (map[string]string, error) - // GetByName calls [AlertsLegacyPreviewAPI.LegacyAlertNameToIdMap] and returns a single [LegacyAlert]. + // GetByDisplayName calls [AlertsAPI.ListAlertsResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsResponseAlert]. // - // Returns an error if there's more than one [LegacyAlert] with the same .Name. + // Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. // - // Note: All [LegacyAlert] instances are loaded into memory before returning matching by name. + // Note: All [ListAlertsResponseAlert] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. - GetByName(ctx context.Context, name string) (*LegacyAlert, error) + GetByDisplayName(ctx context.Context, name string) (*ListAlertsResponseAlert, error) // Update an alert. // // Updates an alert. - // - // **Note**: A new version of the Databricks SQL API is now available. Please - // use :method:alerts/update instead. [Learn more] - // - // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html - Update(ctx context.Context, request EditAlert) error + Update(ctx context.Context, request UpdateAlertRequest) (*Alert, error) } -func NewAlertsLegacyPreview(client *client.DatabricksClient) *AlertsLegacyPreviewAPI { - return &AlertsLegacyPreviewAPI{ - alertsLegacyPreviewImpl: alertsLegacyPreviewImpl{ +func NewAlerts(client *client.DatabricksClient) *AlertsAPI { + return &AlertsAPI{ + alertsImpl: alertsImpl{ client: client, }, } @@ -122,174 +98,183 @@ func NewAlertsLegacyPreview(client *client.DatabricksClient) *AlertsLegacyPrevie // of its result, and notifies one or more users and/or notification // destinations if the condition was met. Alerts can be scheduled using the // `sql_task` type of the Jobs API, e.g. :method:jobs/create. -// -// **Note**: A new version of the Databricks SQL API is now available. Please -// see the latest version. [Learn more] -// -// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -type AlertsLegacyPreviewAPI struct { - alertsLegacyPreviewImpl +type AlertsAPI struct { + alertsImpl } // Delete an alert. // -// Deletes an alert. Deleted alerts are no longer accessible and cannot be -// restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to -// the trash. -// -// **Note**: A new version of the Databricks SQL API is now available. Please -// use :method:alerts/delete instead. [Learn more] -// -// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -func (a *AlertsLegacyPreviewAPI) DeleteByAlertId(ctx context.Context, alertId string) error { - return a.alertsLegacyPreviewImpl.Delete(ctx, DeleteAlertsLegacyRequest{ - AlertId: alertId, +// Moves an alert to the trash. Trashed alerts immediately disappear from +// searches and list views, and can no longer trigger. You can restore a trashed +// alert through the UI. A trashed alert is permanently deleted after 30 days. +func (a *AlertsAPI) DeleteById(ctx context.Context, id string) error { + return a.alertsImpl.Delete(ctx, TrashAlertRequest{ + Id: id, }) } // Get an alert. // // Gets an alert. -// -// **Note**: A new version of the Databricks SQL API is now available. Please -// use :method:alerts/get instead. [Learn more] -// -// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -func (a *AlertsLegacyPreviewAPI) GetByAlertId(ctx context.Context, alertId string) (*LegacyAlert, error) { - return a.alertsLegacyPreviewImpl.Get(ctx, GetAlertsLegacyRequest{ - AlertId: alertId, +func (a *AlertsAPI) GetById(ctx context.Context, id string) (*Alert, error) { + return a.alertsImpl.Get(ctx, GetAlertRequest{ + Id: id, }) } -// LegacyAlertNameToIdMap calls [AlertsLegacyPreviewAPI.List] and creates a map of results with [LegacyAlert].Name as key and [LegacyAlert].Id as value. +// ListAlertsResponseAlertDisplayNameToIdMap calls [AlertsAPI.ListAll] and creates a map of results with [ListAlertsResponseAlert].DisplayName as key and [ListAlertsResponseAlert].Id as value. // -// Returns an error if there's more than one [LegacyAlert] with the same .Name. +// Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. // -// Note: All [LegacyAlert] instances are loaded into memory before creating a map. +// Note: All [ListAlertsResponseAlert] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *AlertsLegacyPreviewAPI) LegacyAlertNameToIdMap(ctx context.Context) (map[string]string, error) { +func (a *AlertsAPI) ListAlertsResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} - result, err := a.List(ctx) + result, err := a.ListAll(ctx, request) if err != nil { return nil, err } for _, v := range result { - key := v.Name + key := v.DisplayName _, duplicate := mapping[key] if duplicate { - return nil, fmt.Errorf("duplicate .Name: %s", key) + return nil, fmt.Errorf("duplicate .DisplayName: %s", key) } mapping[key] = v.Id } return mapping, nil } -// GetByName calls [AlertsLegacyPreviewAPI.LegacyAlertNameToIdMap] and returns a single [LegacyAlert]. +// GetByDisplayName calls [AlertsAPI.ListAlertsResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsResponseAlert]. // -// Returns an error if there's more than one [LegacyAlert] with the same .Name. +// Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. // -// Note: All [LegacyAlert] instances are loaded into memory before returning matching by name. +// Note: All [ListAlertsResponseAlert] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *AlertsLegacyPreviewAPI) GetByName(ctx context.Context, name string) (*LegacyAlert, error) { +func (a *AlertsAPI) GetByDisplayName(ctx context.Context, name string) (*ListAlertsResponseAlert, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") - result, err := a.List(ctx) + result, err := a.ListAll(ctx, ListAlertsRequest{}) if err != nil { return nil, err } - tmp := map[string][]LegacyAlert{} + tmp := map[string][]ListAlertsResponseAlert{} for _, v := range result { - key := v.Name + key := v.DisplayName tmp[key] = append(tmp[key], v) } alternatives, ok := tmp[name] if !ok || len(alternatives) == 0 { - return nil, fmt.Errorf("LegacyAlert named '%s' does not exist", name) + return nil, fmt.Errorf("ListAlertsResponseAlert named '%s' does not exist", name) } if len(alternatives) > 1 { - return nil, fmt.Errorf("there are %d instances of LegacyAlert named '%s'", len(alternatives), name) + return nil, fmt.Errorf("there are %d instances of ListAlertsResponseAlert named '%s'", len(alternatives), name) } return &alternatives[0], nil } -type AlertsPreviewInterface interface { +type AlertsLegacyInterface interface { // Create an alert. // - // Creates an alert. - Create(ctx context.Context, request CreateAlertRequest) (*Alert, error) + // Creates an alert. An alert is a Databricks SQL object that periodically runs + // a query, evaluates a condition of its result, and notifies users or + // notification destinations if the condition was met. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/create instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Create(ctx context.Context, request CreateAlert) (*LegacyAlert, error) // Delete an alert. // - // Moves an alert to the trash. Trashed alerts immediately disappear from - // searches and list views, and can no longer trigger. You can restore a trashed - // alert through the UI. A trashed alert is permanently deleted after 30 days. - Delete(ctx context.Context, request TrashAlertRequest) error + // Deletes an alert. Deleted alerts are no longer accessible and cannot be + // restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to + // the trash. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/delete instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Delete(ctx context.Context, request DeleteAlertsLegacyRequest) error // Delete an alert. // - // Moves an alert to the trash. Trashed alerts immediately disappear from - // searches and list views, and can no longer trigger. You can restore a trashed - // alert through the UI. A trashed alert is permanently deleted after 30 days. - DeleteById(ctx context.Context, id string) error + // Deletes an alert. Deleted alerts are no longer accessible and cannot be + // restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to + // the trash. + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/delete instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + DeleteByAlertId(ctx context.Context, alertId string) error // Get an alert. // // Gets an alert. - Get(ctx context.Context, request GetAlertRequest) (*Alert, error) + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/get instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Get(ctx context.Context, request GetAlertsLegacyRequest) (*LegacyAlert, error) // Get an alert. // // Gets an alert. - GetById(ctx context.Context, id string) (*Alert, error) - - // List alerts. // - // Gets a list of alerts accessible to the user, ordered by creation time. - // **Warning:** Calling this API concurrently 10 or more times could result in - // throttling, service degradation, or a temporary ban. + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/get instead. [Learn more] // - // This method is generated by Databricks SDK Code Generator. - List(ctx context.Context, request ListAlertsRequest) listing.Iterator[ListAlertsResponseAlert] + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + GetByAlertId(ctx context.Context, alertId string) (*LegacyAlert, error) - // List alerts. + // Get alerts. // - // Gets a list of alerts accessible to the user, ordered by creation time. - // **Warning:** Calling this API concurrently 10 or more times could result in - // throttling, service degradation, or a temporary ban. + // Gets a list of alerts. // - // This method is generated by Databricks SDK Code Generator. - ListAll(ctx context.Context, request ListAlertsRequest) ([]ListAlertsResponseAlert, error) + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/list instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + List(ctx context.Context) ([]LegacyAlert, error) - // ListAlertsResponseAlertDisplayNameToIdMap calls [AlertsPreviewAPI.ListAll] and creates a map of results with [ListAlertsResponseAlert].DisplayName as key and [ListAlertsResponseAlert].Id as value. + // LegacyAlertNameToIdMap calls [AlertsLegacyAPI.List] and creates a map of results with [LegacyAlert].Name as key and [LegacyAlert].Id as value. // - // Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. + // Returns an error if there's more than one [LegacyAlert] with the same .Name. // - // Note: All [ListAlertsResponseAlert] instances are loaded into memory before creating a map. + // Note: All [LegacyAlert] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. - ListAlertsResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsRequest) (map[string]string, error) + LegacyAlertNameToIdMap(ctx context.Context) (map[string]string, error) - // GetByDisplayName calls [AlertsPreviewAPI.ListAlertsResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsResponseAlert]. + // GetByName calls [AlertsLegacyAPI.LegacyAlertNameToIdMap] and returns a single [LegacyAlert]. // - // Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. + // Returns an error if there's more than one [LegacyAlert] with the same .Name. // - // Note: All [ListAlertsResponseAlert] instances are loaded into memory before returning matching by name. + // Note: All [LegacyAlert] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. - GetByDisplayName(ctx context.Context, name string) (*ListAlertsResponseAlert, error) + GetByName(ctx context.Context, name string) (*LegacyAlert, error) // Update an alert. // // Updates an alert. - Update(ctx context.Context, request UpdateAlertRequest) (*Alert, error) + // + // **Note**: A new version of the Databricks SQL API is now available. Please + // use :method:alerts/update instead. [Learn more] + // + // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + Update(ctx context.Context, request EditAlert) error } -func NewAlertsPreview(client *client.DatabricksClient) *AlertsPreviewAPI { - return &AlertsPreviewAPI{ - alertsPreviewImpl: alertsPreviewImpl{ +func NewAlertsLegacy(client *client.DatabricksClient) *AlertsLegacyAPI { + return &AlertsLegacyAPI{ + alertsLegacyImpl: alertsLegacyImpl{ client: client, }, } @@ -300,84 +285,99 @@ func NewAlertsPreview(client *client.DatabricksClient) *AlertsPreviewAPI { // of its result, and notifies one or more users and/or notification // destinations if the condition was met. Alerts can be scheduled using the // `sql_task` type of the Jobs API, e.g. :method:jobs/create. -type AlertsPreviewAPI struct { - alertsPreviewImpl +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// see the latest version. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +type AlertsLegacyAPI struct { + alertsLegacyImpl } // Delete an alert. // -// Moves an alert to the trash. Trashed alerts immediately disappear from -// searches and list views, and can no longer trigger. You can restore a trashed -// alert through the UI. A trashed alert is permanently deleted after 30 days. -func (a *AlertsPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.alertsPreviewImpl.Delete(ctx, TrashAlertRequest{ - Id: id, +// Deletes an alert. Deleted alerts are no longer accessible and cannot be +// restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to +// the trash. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:alerts/delete instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *AlertsLegacyAPI) DeleteByAlertId(ctx context.Context, alertId string) error { + return a.alertsLegacyImpl.Delete(ctx, DeleteAlertsLegacyRequest{ + AlertId: alertId, }) } // Get an alert. // // Gets an alert. -func (a *AlertsPreviewAPI) GetById(ctx context.Context, id string) (*Alert, error) { - return a.alertsPreviewImpl.Get(ctx, GetAlertRequest{ - Id: id, +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:alerts/get instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *AlertsLegacyAPI) GetByAlertId(ctx context.Context, alertId string) (*LegacyAlert, error) { + return a.alertsLegacyImpl.Get(ctx, GetAlertsLegacyRequest{ + AlertId: alertId, }) } -// ListAlertsResponseAlertDisplayNameToIdMap calls [AlertsPreviewAPI.ListAll] and creates a map of results with [ListAlertsResponseAlert].DisplayName as key and [ListAlertsResponseAlert].Id as value. +// LegacyAlertNameToIdMap calls [AlertsLegacyAPI.List] and creates a map of results with [LegacyAlert].Name as key and [LegacyAlert].Id as value. // -// Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. +// Returns an error if there's more than one [LegacyAlert] with the same .Name. // -// Note: All [ListAlertsResponseAlert] instances are loaded into memory before creating a map. +// Note: All [LegacyAlert] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *AlertsPreviewAPI) ListAlertsResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsRequest) (map[string]string, error) { +func (a *AlertsLegacyAPI) LegacyAlertNameToIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} - result, err := a.ListAll(ctx, request) + result, err := a.List(ctx) if err != nil { return nil, err } for _, v := range result { - key := v.DisplayName + key := v.Name _, duplicate := mapping[key] if duplicate { - return nil, fmt.Errorf("duplicate .DisplayName: %s", key) + return nil, fmt.Errorf("duplicate .Name: %s", key) } mapping[key] = v.Id } return mapping, nil } -// GetByDisplayName calls [AlertsPreviewAPI.ListAlertsResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsResponseAlert]. +// GetByName calls [AlertsLegacyAPI.LegacyAlertNameToIdMap] and returns a single [LegacyAlert]. // -// Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. +// Returns an error if there's more than one [LegacyAlert] with the same .Name. // -// Note: All [ListAlertsResponseAlert] instances are loaded into memory before returning matching by name. +// Note: All [LegacyAlert] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *AlertsPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*ListAlertsResponseAlert, error) { +func (a *AlertsLegacyAPI) GetByName(ctx context.Context, name string) (*LegacyAlert, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") - result, err := a.ListAll(ctx, ListAlertsRequest{}) + result, err := a.List(ctx) if err != nil { return nil, err } - tmp := map[string][]ListAlertsResponseAlert{} + tmp := map[string][]LegacyAlert{} for _, v := range result { - key := v.DisplayName + key := v.Name tmp[key] = append(tmp[key], v) } alternatives, ok := tmp[name] if !ok || len(alternatives) == 0 { - return nil, fmt.Errorf("ListAlertsResponseAlert named '%s' does not exist", name) + return nil, fmt.Errorf("LegacyAlert named '%s' does not exist", name) } if len(alternatives) > 1 { - return nil, fmt.Errorf("there are %d instances of ListAlertsResponseAlert named '%s'", len(alternatives), name) + return nil, fmt.Errorf("there are %d instances of LegacyAlert named '%s'", len(alternatives), name) } return &alternatives[0], nil } -type DashboardWidgetsPreviewInterface interface { +type DashboardWidgetsInterface interface { // Add widget to a dashboard. Create(ctx context.Context, request CreateWidget) (*Widget, error) @@ -392,9 +392,9 @@ type DashboardWidgetsPreviewInterface interface { Update(ctx context.Context, request CreateWidget) (*Widget, error) } -func NewDashboardWidgetsPreview(client *client.DatabricksClient) *DashboardWidgetsPreviewAPI { - return &DashboardWidgetsPreviewAPI{ - dashboardWidgetsPreviewImpl: dashboardWidgetsPreviewImpl{ +func NewDashboardWidgets(client *client.DatabricksClient) *DashboardWidgetsAPI { + return &DashboardWidgetsAPI{ + dashboardWidgetsImpl: dashboardWidgetsImpl{ client: client, }, } @@ -403,18 +403,18 @@ func NewDashboardWidgetsPreview(client *client.DatabricksClient) *DashboardWidge // This is an evolving API that facilitates the addition and removal of widgets // from existing dashboards within the Databricks Workspace. Data structures may // change over time. -type DashboardWidgetsPreviewAPI struct { - dashboardWidgetsPreviewImpl +type DashboardWidgetsAPI struct { + dashboardWidgetsImpl } // Remove widget. -func (a *DashboardWidgetsPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.dashboardWidgetsPreviewImpl.Delete(ctx, DeleteDashboardWidgetRequest{ +func (a *DashboardWidgetsAPI) DeleteById(ctx context.Context, id string) error { + return a.dashboardWidgetsImpl.Delete(ctx, DeleteDashboardWidgetRequest{ Id: id, }) } -type DashboardsPreviewInterface interface { +type DashboardsInterface interface { // Create a dashboard object. Create(ctx context.Context, request DashboardPostContent) (*Dashboard, error) @@ -463,7 +463,7 @@ type DashboardsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) - // DashboardNameToIdMap calls [DashboardsPreviewAPI.ListAll] and creates a map of results with [Dashboard].Name as key and [Dashboard].Id as value. + // DashboardNameToIdMap calls [DashboardsAPI.ListAll] and creates a map of results with [Dashboard].Name as key and [Dashboard].Id as value. // // Returns an error if there's more than one [Dashboard] with the same .Name. // @@ -472,7 +472,7 @@ type DashboardsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. DashboardNameToIdMap(ctx context.Context, request ListDashboardsRequest) (map[string]string, error) - // GetByName calls [DashboardsPreviewAPI.DashboardNameToIdMap] and returns a single [Dashboard]. + // GetByName calls [DashboardsAPI.DashboardNameToIdMap] and returns a single [Dashboard]. // // Returns an error if there's more than one [Dashboard] with the same .Name. // @@ -495,9 +495,9 @@ type DashboardsPreviewInterface interface { Update(ctx context.Context, request DashboardEditContent) (*Dashboard, error) } -func NewDashboardsPreview(client *client.DatabricksClient) *DashboardsPreviewAPI { - return &DashboardsPreviewAPI{ - dashboardsPreviewImpl: dashboardsPreviewImpl{ +func NewDashboards(client *client.DatabricksClient) *DashboardsAPI { + return &DashboardsAPI{ + dashboardsImpl: dashboardsImpl{ client: client, }, } @@ -509,16 +509,16 @@ func NewDashboardsPreview(client *client.DatabricksClient) *DashboardsPreviewAPI // since you can get a dashboard definition with a GET request and then POST it // to create a new one. Dashboards can be scheduled using the `sql_task` type of // the Jobs API, e.g. :method:jobs/create. -type DashboardsPreviewAPI struct { - dashboardsPreviewImpl +type DashboardsAPI struct { + dashboardsImpl } // Remove a dashboard. // // Moves a dashboard to the trash. Trashed dashboards do not appear in list // views or searches, and cannot be shared. -func (a *DashboardsPreviewAPI) DeleteByDashboardId(ctx context.Context, dashboardId string) error { - return a.dashboardsPreviewImpl.Delete(ctx, DeleteDashboardRequest{ +func (a *DashboardsAPI) DeleteByDashboardId(ctx context.Context, dashboardId string) error { + return a.dashboardsImpl.Delete(ctx, DeleteDashboardRequest{ DashboardId: dashboardId, }) } @@ -527,20 +527,20 @@ func (a *DashboardsPreviewAPI) DeleteByDashboardId(ctx context.Context, dashboar // // Returns a JSON representation of a dashboard object, including its // visualization and query objects. -func (a *DashboardsPreviewAPI) GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error) { - return a.dashboardsPreviewImpl.Get(ctx, GetDashboardRequest{ +func (a *DashboardsAPI) GetByDashboardId(ctx context.Context, dashboardId string) (*Dashboard, error) { + return a.dashboardsImpl.Get(ctx, GetDashboardRequest{ DashboardId: dashboardId, }) } -// DashboardNameToIdMap calls [DashboardsPreviewAPI.ListAll] and creates a map of results with [Dashboard].Name as key and [Dashboard].Id as value. +// DashboardNameToIdMap calls [DashboardsAPI.ListAll] and creates a map of results with [Dashboard].Name as key and [Dashboard].Id as value. // // Returns an error if there's more than one [Dashboard] with the same .Name. // // Note: All [Dashboard] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *DashboardsPreviewAPI) DashboardNameToIdMap(ctx context.Context, request ListDashboardsRequest) (map[string]string, error) { +func (a *DashboardsAPI) DashboardNameToIdMap(ctx context.Context, request ListDashboardsRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -558,14 +558,14 @@ func (a *DashboardsPreviewAPI) DashboardNameToIdMap(ctx context.Context, request return mapping, nil } -// GetByName calls [DashboardsPreviewAPI.DashboardNameToIdMap] and returns a single [Dashboard]. +// GetByName calls [DashboardsAPI.DashboardNameToIdMap] and returns a single [Dashboard]. // // Returns an error if there's more than one [Dashboard] with the same .Name. // // Note: All [Dashboard] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *DashboardsPreviewAPI) GetByName(ctx context.Context, name string) (*Dashboard, error) { +func (a *DashboardsAPI) GetByName(ctx context.Context, name string) (*Dashboard, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListDashboardsRequest{}) if err != nil { @@ -586,7 +586,7 @@ func (a *DashboardsPreviewAPI) GetByName(ctx context.Context, name string) (*Das return &alternatives[0], nil } -type DataSourcesPreviewInterface interface { +type DataSourcesInterface interface { // Get a list of SQL warehouses. // @@ -600,7 +600,7 @@ type DataSourcesPreviewInterface interface { // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html List(ctx context.Context) ([]DataSource, error) - // DataSourceNameToIdMap calls [DataSourcesPreviewAPI.List] and creates a map of results with [DataSource].Name as key and [DataSource].Id as value. + // DataSourceNameToIdMap calls [DataSourcesAPI.List] and creates a map of results with [DataSource].Name as key and [DataSource].Id as value. // // Returns an error if there's more than one [DataSource] with the same .Name. // @@ -609,7 +609,7 @@ type DataSourcesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. DataSourceNameToIdMap(ctx context.Context) (map[string]string, error) - // GetByName calls [DataSourcesPreviewAPI.DataSourceNameToIdMap] and returns a single [DataSource]. + // GetByName calls [DataSourcesAPI.DataSourceNameToIdMap] and returns a single [DataSource]. // // Returns an error if there's more than one [DataSource] with the same .Name. // @@ -619,9 +619,9 @@ type DataSourcesPreviewInterface interface { GetByName(ctx context.Context, name string) (*DataSource, error) } -func NewDataSourcesPreview(client *client.DatabricksClient) *DataSourcesPreviewAPI { - return &DataSourcesPreviewAPI{ - dataSourcesPreviewImpl: dataSourcesPreviewImpl{ +func NewDataSources(client *client.DatabricksClient) *DataSourcesAPI { + return &DataSourcesAPI{ + dataSourcesImpl: dataSourcesImpl{ client: client, }, } @@ -642,18 +642,18 @@ func NewDataSourcesPreview(client *client.DatabricksClient) *DataSourcesPreviewA // more] // // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -type DataSourcesPreviewAPI struct { - dataSourcesPreviewImpl +type DataSourcesAPI struct { + dataSourcesImpl } -// DataSourceNameToIdMap calls [DataSourcesPreviewAPI.List] and creates a map of results with [DataSource].Name as key and [DataSource].Id as value. +// DataSourceNameToIdMap calls [DataSourcesAPI.List] and creates a map of results with [DataSource].Name as key and [DataSource].Id as value. // // Returns an error if there's more than one [DataSource] with the same .Name. // // Note: All [DataSource] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *DataSourcesPreviewAPI) DataSourceNameToIdMap(ctx context.Context) (map[string]string, error) { +func (a *DataSourcesAPI) DataSourceNameToIdMap(ctx context.Context) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.List(ctx) @@ -671,14 +671,14 @@ func (a *DataSourcesPreviewAPI) DataSourceNameToIdMap(ctx context.Context) (map[ return mapping, nil } -// GetByName calls [DataSourcesPreviewAPI.DataSourceNameToIdMap] and returns a single [DataSource]. +// GetByName calls [DataSourcesAPI.DataSourceNameToIdMap] and returns a single [DataSource]. // // Returns an error if there's more than one [DataSource] with the same .Name. // // Note: All [DataSource] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *DataSourcesPreviewAPI) GetByName(ctx context.Context, name string) (*DataSource, error) { +func (a *DataSourcesAPI) GetByName(ctx context.Context, name string) (*DataSource, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.List(ctx) if err != nil { @@ -699,7 +699,7 @@ func (a *DataSourcesPreviewAPI) GetByName(ctx context.Context, name string) (*Da return &alternatives[0], nil } -type DbsqlPermissionsPreviewInterface interface { +type DbsqlPermissionsInterface interface { // Get object ACL. // @@ -747,9 +747,9 @@ type DbsqlPermissionsPreviewInterface interface { TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error) } -func NewDbsqlPermissionsPreview(client *client.DatabricksClient) *DbsqlPermissionsPreviewAPI { - return &DbsqlPermissionsPreviewAPI{ - dbsqlPermissionsPreviewImpl: dbsqlPermissionsPreviewImpl{ +func NewDbsqlPermissions(client *client.DatabricksClient) *DbsqlPermissionsAPI { + return &DbsqlPermissionsAPI{ + dbsqlPermissionsImpl: dbsqlPermissionsImpl{ client: client, }, } @@ -773,27 +773,220 @@ func NewDbsqlPermissionsPreview(client *client.DatabricksClient) *DbsqlPermissio // more] // // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -type DbsqlPermissionsPreviewAPI struct { - dbsqlPermissionsPreviewImpl +type DbsqlPermissionsAPI struct { + dbsqlPermissionsImpl +} + +// Get object ACL. +// +// Gets a JSON representation of the access control list (ACL) for a specified +// object. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:workspace/getpermissions instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *DbsqlPermissionsAPI) GetByObjectTypeAndObjectId(ctx context.Context, objectType ObjectTypePlural, objectId string) (*GetResponse, error) { + return a.dbsqlPermissionsImpl.Get(ctx, GetDbsqlPermissionRequest{ + ObjectType: objectType, + ObjectId: objectId, + }) +} + +type QueriesInterface interface { + + // Create a query. + // + // Creates a query. + Create(ctx context.Context, request CreateQueryRequest) (*Query, error) + + // Delete a query. + // + // Moves a query to the trash. Trashed queries immediately disappear from + // searches and list views, and cannot be used for alerts. You can restore a + // trashed query through the UI. A trashed query is permanently deleted after 30 + // days. + Delete(ctx context.Context, request TrashQueryRequest) error + + // Delete a query. + // + // Moves a query to the trash. Trashed queries immediately disappear from + // searches and list views, and cannot be used for alerts. You can restore a + // trashed query through the UI. A trashed query is permanently deleted after 30 + // days. + DeleteById(ctx context.Context, id string) error + + // Get a query. + // + // Gets a query. + Get(ctx context.Context, request GetQueryRequest) (*Query, error) + + // Get a query. + // + // Gets a query. + GetById(ctx context.Context, id string) (*Query, error) + + // List queries. + // + // Gets a list of queries accessible to the user, ordered by creation time. + // **Warning:** Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListQueriesRequest) listing.Iterator[ListQueryObjectsResponseQuery] + + // List queries. + // + // Gets a list of queries accessible to the user, ordered by creation time. + // **Warning:** Calling this API concurrently 10 or more times could result in + // throttling, service degradation, or a temporary ban. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListQueriesRequest) ([]ListQueryObjectsResponseQuery, error) + + // ListQueryObjectsResponseQueryDisplayNameToIdMap calls [QueriesAPI.ListAll] and creates a map of results with [ListQueryObjectsResponseQuery].DisplayName as key and [ListQueryObjectsResponseQuery].Id as value. + // + // Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. + // + // Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ListQueryObjectsResponseQueryDisplayNameToIdMap(ctx context.Context, request ListQueriesRequest) (map[string]string, error) + + // GetByDisplayName calls [QueriesAPI.ListQueryObjectsResponseQueryDisplayNameToIdMap] and returns a single [ListQueryObjectsResponseQuery]. + // + // Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. + // + // Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByDisplayName(ctx context.Context, name string) (*ListQueryObjectsResponseQuery, error) + + // List visualizations on a query. + // + // Gets a list of visualizations on a query. + // + // This method is generated by Databricks SDK Code Generator. + ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) listing.Iterator[Visualization] + + // List visualizations on a query. + // + // Gets a list of visualizations on a query. + // + // This method is generated by Databricks SDK Code Generator. + ListVisualizationsAll(ctx context.Context, request ListVisualizationsForQueryRequest) ([]Visualization, error) + + // List visualizations on a query. + // + // Gets a list of visualizations on a query. + ListVisualizationsById(ctx context.Context, id string) (*ListVisualizationsForQueryResponse, error) + + // Update a query. + // + // Updates a query. + Update(ctx context.Context, request UpdateQueryRequest) (*Query, error) +} + +func NewQueries(client *client.DatabricksClient) *QueriesAPI { + return &QueriesAPI{ + queriesImpl: queriesImpl{ + client: client, + }, + } +} + +// The queries API can be used to perform CRUD operations on queries. A query is +// a Databricks SQL object that includes the target SQL warehouse, query text, +// name, description, tags, and parameters. Queries can be scheduled using the +// `sql_task` type of the Jobs API, e.g. :method:jobs/create. +type QueriesAPI struct { + queriesImpl +} + +// Delete a query. +// +// Moves a query to the trash. Trashed queries immediately disappear from +// searches and list views, and cannot be used for alerts. You can restore a +// trashed query through the UI. A trashed query is permanently deleted after 30 +// days. +func (a *QueriesAPI) DeleteById(ctx context.Context, id string) error { + return a.queriesImpl.Delete(ctx, TrashQueryRequest{ + Id: id, + }) +} + +// Get a query. +// +// Gets a query. +func (a *QueriesAPI) GetById(ctx context.Context, id string) (*Query, error) { + return a.queriesImpl.Get(ctx, GetQueryRequest{ + Id: id, + }) +} + +// ListQueryObjectsResponseQueryDisplayNameToIdMap calls [QueriesAPI.ListAll] and creates a map of results with [ListQueryObjectsResponseQuery].DisplayName as key and [ListQueryObjectsResponseQuery].Id as value. +// +// Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. +// +// Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *QueriesAPI) ListQueryObjectsResponseQueryDisplayNameToIdMap(ctx context.Context, request ListQueriesRequest) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.DisplayName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .DisplayName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByDisplayName calls [QueriesAPI.ListQueryObjectsResponseQueryDisplayNameToIdMap] and returns a single [ListQueryObjectsResponseQuery]. +// +// Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. +// +// Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *QueriesAPI) GetByDisplayName(ctx context.Context, name string) (*ListQueryObjectsResponseQuery, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAll(ctx, ListQueriesRequest{}) + if err != nil { + return nil, err + } + tmp := map[string][]ListQueryObjectsResponseQuery{} + for _, v := range result { + key := v.DisplayName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ListQueryObjectsResponseQuery named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ListQueryObjectsResponseQuery named '%s'", len(alternatives), name) + } + return &alternatives[0], nil } -// Get object ACL. -// -// Gets a JSON representation of the access control list (ACL) for a specified -// object. -// -// **Note**: A new version of the Databricks SQL API is now available. Please -// use :method:workspace/getpermissions instead. [Learn more] +// List visualizations on a query. // -// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -func (a *DbsqlPermissionsPreviewAPI) GetByObjectTypeAndObjectId(ctx context.Context, objectType ObjectTypePlural, objectId string) (*GetResponse, error) { - return a.dbsqlPermissionsPreviewImpl.Get(ctx, GetDbsqlPermissionRequest{ - ObjectType: objectType, - ObjectId: objectId, +// Gets a list of visualizations on a query. +func (a *QueriesAPI) ListVisualizationsById(ctx context.Context, id string) (*ListVisualizationsForQueryResponse, error) { + return a.queriesImpl.internalListVisualizations(ctx, ListVisualizationsForQueryRequest{ + Id: id, }) } -type QueriesLegacyPreviewInterface interface { +type QueriesLegacyInterface interface { // Create a new query definition. // @@ -891,7 +1084,7 @@ type QueriesLegacyPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListQueriesLegacyRequest) ([]LegacyQuery, error) - // LegacyQueryNameToIdMap calls [QueriesLegacyPreviewAPI.ListAll] and creates a map of results with [LegacyQuery].Name as key and [LegacyQuery].Id as value. + // LegacyQueryNameToIdMap calls [QueriesLegacyAPI.ListAll] and creates a map of results with [LegacyQuery].Name as key and [LegacyQuery].Id as value. // // Returns an error if there's more than one [LegacyQuery] with the same .Name. // @@ -900,7 +1093,7 @@ type QueriesLegacyPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. LegacyQueryNameToIdMap(ctx context.Context, request ListQueriesLegacyRequest) (map[string]string, error) - // GetByName calls [QueriesLegacyPreviewAPI.LegacyQueryNameToIdMap] and returns a single [LegacyQuery]. + // GetByName calls [QueriesLegacyAPI.LegacyQueryNameToIdMap] and returns a single [LegacyQuery]. // // Returns an error if there's more than one [LegacyQuery] with the same .Name. // @@ -933,9 +1126,9 @@ type QueriesLegacyPreviewInterface interface { Update(ctx context.Context, request QueryEditContent) (*LegacyQuery, error) } -func NewQueriesLegacyPreview(client *client.DatabricksClient) *QueriesLegacyPreviewAPI { - return &QueriesLegacyPreviewAPI{ - queriesLegacyPreviewImpl: queriesLegacyPreviewImpl{ +func NewQueriesLegacy(client *client.DatabricksClient) *QueriesLegacyAPI { + return &QueriesLegacyAPI{ + queriesLegacyImpl: queriesLegacyImpl{ client: client, }, } @@ -950,8 +1143,8 @@ func NewQueriesLegacyPreview(client *client.DatabricksClient) *QueriesLegacyPrev // see the latest version. [Learn more] // // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -type QueriesLegacyPreviewAPI struct { - queriesLegacyPreviewImpl +type QueriesLegacyAPI struct { + queriesLegacyImpl } // Delete a query. @@ -964,8 +1157,8 @@ type QueriesLegacyPreviewAPI struct { // use :method:queries/delete instead. [Learn more] // // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -func (a *QueriesLegacyPreviewAPI) DeleteByQueryId(ctx context.Context, queryId string) error { - return a.queriesLegacyPreviewImpl.Delete(ctx, DeleteQueriesLegacyRequest{ +func (a *QueriesLegacyAPI) DeleteByQueryId(ctx context.Context, queryId string) error { + return a.queriesLegacyImpl.Delete(ctx, DeleteQueriesLegacyRequest{ QueryId: queryId, }) } @@ -979,20 +1172,20 @@ func (a *QueriesLegacyPreviewAPI) DeleteByQueryId(ctx context.Context, queryId s // use :method:queries/get instead. [Learn more] // // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -func (a *QueriesLegacyPreviewAPI) GetByQueryId(ctx context.Context, queryId string) (*LegacyQuery, error) { - return a.queriesLegacyPreviewImpl.Get(ctx, GetQueriesLegacyRequest{ +func (a *QueriesLegacyAPI) GetByQueryId(ctx context.Context, queryId string) (*LegacyQuery, error) { + return a.queriesLegacyImpl.Get(ctx, GetQueriesLegacyRequest{ QueryId: queryId, }) } -// LegacyQueryNameToIdMap calls [QueriesLegacyPreviewAPI.ListAll] and creates a map of results with [LegacyQuery].Name as key and [LegacyQuery].Id as value. +// LegacyQueryNameToIdMap calls [QueriesLegacyAPI.ListAll] and creates a map of results with [LegacyQuery].Name as key and [LegacyQuery].Id as value. // // Returns an error if there's more than one [LegacyQuery] with the same .Name. // // Note: All [LegacyQuery] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *QueriesLegacyPreviewAPI) LegacyQueryNameToIdMap(ctx context.Context, request ListQueriesLegacyRequest) (map[string]string, error) { +func (a *QueriesLegacyAPI) LegacyQueryNameToIdMap(ctx context.Context, request ListQueriesLegacyRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -1010,14 +1203,14 @@ func (a *QueriesLegacyPreviewAPI) LegacyQueryNameToIdMap(ctx context.Context, re return mapping, nil } -// GetByName calls [QueriesLegacyPreviewAPI.LegacyQueryNameToIdMap] and returns a single [LegacyQuery]. +// GetByName calls [QueriesLegacyAPI.LegacyQueryNameToIdMap] and returns a single [LegacyQuery]. // // Returns an error if there's more than one [LegacyQuery] with the same .Name. // // Note: All [LegacyQuery] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *QueriesLegacyPreviewAPI) GetByName(ctx context.Context, name string) (*LegacyQuery, error) { +func (a *QueriesLegacyAPI) GetByName(ctx context.Context, name string) (*LegacyQuery, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListQueriesLegacyRequest{}) if err != nil { @@ -1038,227 +1231,81 @@ func (a *QueriesLegacyPreviewAPI) GetByName(ctx context.Context, name string) (* return &alternatives[0], nil } -type QueriesPreviewInterface interface { - - // Create a query. - // - // Creates a query. - Create(ctx context.Context, request CreateQueryRequest) (*Query, error) - - // Delete a query. - // - // Moves a query to the trash. Trashed queries immediately disappear from - // searches and list views, and cannot be used for alerts. You can restore a - // trashed query through the UI. A trashed query is permanently deleted after 30 - // days. - Delete(ctx context.Context, request TrashQueryRequest) error - - // Delete a query. - // - // Moves a query to the trash. Trashed queries immediately disappear from - // searches and list views, and cannot be used for alerts. You can restore a - // trashed query through the UI. A trashed query is permanently deleted after 30 - // days. - DeleteById(ctx context.Context, id string) error - - // Get a query. - // - // Gets a query. - Get(ctx context.Context, request GetQueryRequest) (*Query, error) - - // Get a query. - // - // Gets a query. - GetById(ctx context.Context, id string) (*Query, error) +type QueryHistoryInterface interface { - // List queries. + // List Queries. // - // Gets a list of queries accessible to the user, ordered by creation time. - // **Warning:** Calling this API concurrently 10 or more times could result in - // throttling, service degradation, or a temporary ban. + // List the history of queries through SQL warehouses, and serverless compute. // - // This method is generated by Databricks SDK Code Generator. - List(ctx context.Context, request ListQueriesRequest) listing.Iterator[ListQueryObjectsResponseQuery] + // You can filter by user ID, warehouse ID, status, and time range. Most + // recently started queries are returned first (up to max_results in request). + // The pagination token returned in response can be used to list subsequent + // query statuses. + List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error) +} - // List queries. - // - // Gets a list of queries accessible to the user, ordered by creation time. - // **Warning:** Calling this API concurrently 10 or more times could result in - // throttling, service degradation, or a temporary ban. - // - // This method is generated by Databricks SDK Code Generator. - ListAll(ctx context.Context, request ListQueriesRequest) ([]ListQueryObjectsResponseQuery, error) +func NewQueryHistory(client *client.DatabricksClient) *QueryHistoryAPI { + return &QueryHistoryAPI{ + queryHistoryImpl: queryHistoryImpl{ + client: client, + }, + } +} - // ListQueryObjectsResponseQueryDisplayNameToIdMap calls [QueriesPreviewAPI.ListAll] and creates a map of results with [ListQueryObjectsResponseQuery].DisplayName as key and [ListQueryObjectsResponseQuery].Id as value. - // - // Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. - // - // Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before creating a map. - // - // This method is generated by Databricks SDK Code Generator. - ListQueryObjectsResponseQueryDisplayNameToIdMap(ctx context.Context, request ListQueriesRequest) (map[string]string, error) +// A service responsible for storing and retrieving the list of queries run +// against SQL endpoints and serverless compute. +type QueryHistoryAPI struct { + queryHistoryImpl +} - // GetByDisplayName calls [QueriesPreviewAPI.ListQueryObjectsResponseQueryDisplayNameToIdMap] and returns a single [ListQueryObjectsResponseQuery]. - // - // Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. - // - // Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before returning matching by name. - // - // This method is generated by Databricks SDK Code Generator. - GetByDisplayName(ctx context.Context, name string) (*ListQueryObjectsResponseQuery, error) +type QueryVisualizationsInterface interface { - // List visualizations on a query. - // - // Gets a list of visualizations on a query. + // Add a visualization to a query. // - // This method is generated by Databricks SDK Code Generator. - ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) listing.Iterator[Visualization] + // Adds a visualization to a query. + Create(ctx context.Context, request CreateVisualizationRequest) (*Visualization, error) - // List visualizations on a query. - // - // Gets a list of visualizations on a query. + // Remove a visualization. // - // This method is generated by Databricks SDK Code Generator. - ListVisualizationsAll(ctx context.Context, request ListVisualizationsForQueryRequest) ([]Visualization, error) + // Removes a visualization. + Delete(ctx context.Context, request DeleteVisualizationRequest) error - // List visualizations on a query. + // Remove a visualization. // - // Gets a list of visualizations on a query. - ListVisualizationsById(ctx context.Context, id string) (*ListVisualizationsForQueryResponse, error) + // Removes a visualization. + DeleteById(ctx context.Context, id string) error - // Update a query. + // Update a visualization. // - // Updates a query. - Update(ctx context.Context, request UpdateQueryRequest) (*Query, error) + // Updates a visualization. + Update(ctx context.Context, request UpdateVisualizationRequest) (*Visualization, error) } -func NewQueriesPreview(client *client.DatabricksClient) *QueriesPreviewAPI { - return &QueriesPreviewAPI{ - queriesPreviewImpl: queriesPreviewImpl{ +func NewQueryVisualizations(client *client.DatabricksClient) *QueryVisualizationsAPI { + return &QueryVisualizationsAPI{ + queryVisualizationsImpl: queryVisualizationsImpl{ client: client, }, } } -// The queries API can be used to perform CRUD operations on queries. A query is -// a Databricks SQL object that includes the target SQL warehouse, query text, -// name, description, tags, and parameters. Queries can be scheduled using the -// `sql_task` type of the Jobs API, e.g. :method:jobs/create. -type QueriesPreviewAPI struct { - queriesPreviewImpl -} - -// Delete a query. -// -// Moves a query to the trash. Trashed queries immediately disappear from -// searches and list views, and cannot be used for alerts. You can restore a -// trashed query through the UI. A trashed query is permanently deleted after 30 -// days. -func (a *QueriesPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.queriesPreviewImpl.Delete(ctx, TrashQueryRequest{ - Id: id, - }) -} - -// Get a query. -// -// Gets a query. -func (a *QueriesPreviewAPI) GetById(ctx context.Context, id string) (*Query, error) { - return a.queriesPreviewImpl.Get(ctx, GetQueryRequest{ - Id: id, - }) -} - -// ListQueryObjectsResponseQueryDisplayNameToIdMap calls [QueriesPreviewAPI.ListAll] and creates a map of results with [ListQueryObjectsResponseQuery].DisplayName as key and [ListQueryObjectsResponseQuery].Id as value. -// -// Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. -// -// Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before creating a map. -// -// This method is generated by Databricks SDK Code Generator. -func (a *QueriesPreviewAPI) ListQueryObjectsResponseQueryDisplayNameToIdMap(ctx context.Context, request ListQueriesRequest) (map[string]string, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") - mapping := map[string]string{} - result, err := a.ListAll(ctx, request) - if err != nil { - return nil, err - } - for _, v := range result { - key := v.DisplayName - _, duplicate := mapping[key] - if duplicate { - return nil, fmt.Errorf("duplicate .DisplayName: %s", key) - } - mapping[key] = v.Id - } - return mapping, nil -} - -// GetByDisplayName calls [QueriesPreviewAPI.ListQueryObjectsResponseQueryDisplayNameToIdMap] and returns a single [ListQueryObjectsResponseQuery]. -// -// Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. -// -// Note: All [ListQueryObjectsResponseQuery] instances are loaded into memory before returning matching by name. -// -// This method is generated by Databricks SDK Code Generator. -func (a *QueriesPreviewAPI) GetByDisplayName(ctx context.Context, name string) (*ListQueryObjectsResponseQuery, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") - result, err := a.ListAll(ctx, ListQueriesRequest{}) - if err != nil { - return nil, err - } - tmp := map[string][]ListQueryObjectsResponseQuery{} - for _, v := range result { - key := v.DisplayName - tmp[key] = append(tmp[key], v) - } - alternatives, ok := tmp[name] - if !ok || len(alternatives) == 0 { - return nil, fmt.Errorf("ListQueryObjectsResponseQuery named '%s' does not exist", name) - } - if len(alternatives) > 1 { - return nil, fmt.Errorf("there are %d instances of ListQueryObjectsResponseQuery named '%s'", len(alternatives), name) - } - return &alternatives[0], nil +// This is an evolving API that facilitates the addition and removal of +// visualizations from existing queries in the Databricks Workspace. Data +// structures can change over time. +type QueryVisualizationsAPI struct { + queryVisualizationsImpl } -// List visualizations on a query. +// Remove a visualization. // -// Gets a list of visualizations on a query. -func (a *QueriesPreviewAPI) ListVisualizationsById(ctx context.Context, id string) (*ListVisualizationsForQueryResponse, error) { - return a.queriesPreviewImpl.internalListVisualizations(ctx, ListVisualizationsForQueryRequest{ +// Removes a visualization. +func (a *QueryVisualizationsAPI) DeleteById(ctx context.Context, id string) error { + return a.queryVisualizationsImpl.Delete(ctx, DeleteVisualizationRequest{ Id: id, }) } -type QueryHistoryPreviewInterface interface { - - // List Queries. - // - // List the history of queries through SQL warehouses, and serverless compute. - // - // You can filter by user ID, warehouse ID, status, and time range. Most - // recently started queries are returned first (up to max_results in request). - // The pagination token returned in response can be used to list subsequent - // query statuses. - List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error) -} - -func NewQueryHistoryPreview(client *client.DatabricksClient) *QueryHistoryPreviewAPI { - return &QueryHistoryPreviewAPI{ - queryHistoryPreviewImpl: queryHistoryPreviewImpl{ - client: client, - }, - } -} - -// A service responsible for storing and retrieving the list of queries run -// against SQL endpoints and serverless compute. -type QueryHistoryPreviewAPI struct { - queryHistoryPreviewImpl -} - -type QueryVisualizationsLegacyPreviewInterface interface { +type QueryVisualizationsLegacyInterface interface { // Add visualization to a query. // @@ -1301,9 +1348,9 @@ type QueryVisualizationsLegacyPreviewInterface interface { Update(ctx context.Context, request LegacyVisualization) (*LegacyVisualization, error) } -func NewQueryVisualizationsLegacyPreview(client *client.DatabricksClient) *QueryVisualizationsLegacyPreviewAPI { - return &QueryVisualizationsLegacyPreviewAPI{ - queryVisualizationsLegacyPreviewImpl: queryVisualizationsLegacyPreviewImpl{ +func NewQueryVisualizationsLegacy(client *client.DatabricksClient) *QueryVisualizationsLegacyAPI { + return &QueryVisualizationsLegacyAPI{ + queryVisualizationsLegacyImpl: queryVisualizationsLegacyImpl{ client: client, }, } @@ -1317,8 +1364,8 @@ func NewQueryVisualizationsLegacyPreview(client *client.DatabricksClient) *Query // see the latest version. [Learn more] // // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -type QueryVisualizationsLegacyPreviewAPI struct { - queryVisualizationsLegacyPreviewImpl +type QueryVisualizationsLegacyAPI struct { + queryVisualizationsLegacyImpl } // Remove visualization. @@ -1329,79 +1376,33 @@ type QueryVisualizationsLegacyPreviewAPI struct { // use :method:queryvisualizations/delete instead. [Learn more] // // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -func (a *QueryVisualizationsLegacyPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.queryVisualizationsLegacyPreviewImpl.Delete(ctx, DeleteQueryVisualizationsLegacyRequest{ - Id: id, - }) -} - -type QueryVisualizationsPreviewInterface interface { - - // Add a visualization to a query. - // - // Adds a visualization to a query. - Create(ctx context.Context, request CreateVisualizationRequest) (*Visualization, error) - - // Remove a visualization. - // - // Removes a visualization. - Delete(ctx context.Context, request DeleteVisualizationRequest) error - - // Remove a visualization. - // - // Removes a visualization. - DeleteById(ctx context.Context, id string) error - - // Update a visualization. - // - // Updates a visualization. - Update(ctx context.Context, request UpdateVisualizationRequest) (*Visualization, error) -} - -func NewQueryVisualizationsPreview(client *client.DatabricksClient) *QueryVisualizationsPreviewAPI { - return &QueryVisualizationsPreviewAPI{ - queryVisualizationsPreviewImpl: queryVisualizationsPreviewImpl{ - client: client, - }, - } -} - -// This is an evolving API that facilitates the addition and removal of -// visualizations from existing queries in the Databricks Workspace. Data -// structures can change over time. -type QueryVisualizationsPreviewAPI struct { - queryVisualizationsPreviewImpl -} - -// Remove a visualization. -// -// Removes a visualization. -func (a *QueryVisualizationsPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.queryVisualizationsPreviewImpl.Delete(ctx, DeleteVisualizationRequest{ +func (a *QueryVisualizationsLegacyAPI) DeleteById(ctx context.Context, id string) error { + return a.queryVisualizationsLegacyImpl.Delete(ctx, DeleteQueryVisualizationsLegacyRequest{ Id: id, }) } -type RedashConfigPreviewInterface interface { +type RedashConfigInterface interface { // Read workspace configuration for Redash-v2. GetConfig(ctx context.Context) (*ClientConfig, error) } -func NewRedashConfigPreview(client *client.DatabricksClient) *RedashConfigPreviewAPI { - return &RedashConfigPreviewAPI{ - redashConfigPreviewImpl: redashConfigPreviewImpl{ +func NewRedashConfig(client *client.DatabricksClient) *RedashConfigAPI { + return &RedashConfigAPI{ + redashConfigImpl: redashConfigImpl{ client: client, }, } } // Redash V2 service for workspace configurations (internal) -type RedashConfigPreviewAPI struct { - redashConfigPreviewImpl +type RedashConfigAPI struct { + redashConfigImpl } -type StatementExecutionPreviewInterface interface { +type StatementExecutionInterface interface { + statementExecutionAPIUtilities // Cancel statement execution. // @@ -1463,9 +1464,9 @@ type StatementExecutionPreviewInterface interface { GetStatementResultChunkNByStatementIdAndChunkIndex(ctx context.Context, statementId string, chunkIndex int) (*ResultData, error) } -func NewStatementExecutionPreview(client *client.DatabricksClient) *StatementExecutionPreviewAPI { - return &StatementExecutionPreviewAPI{ - statementExecutionPreviewImpl: statementExecutionPreviewImpl{ +func NewStatementExecution(client *client.DatabricksClient) *StatementExecutionAPI { + return &StatementExecutionAPI{ + statementExecutionImpl: statementExecutionImpl{ client: client, }, } @@ -1576,8 +1577,8 @@ func NewStatementExecutionPreview(client *client.DatabricksClient) *StatementExe // // [Apache Arrow Columnar]: https://arrow.apache.org/overview/ // [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html -type StatementExecutionPreviewAPI struct { - statementExecutionPreviewImpl +type StatementExecutionAPI struct { + statementExecutionImpl } // Get status, manifest, and result first chunk. @@ -1591,8 +1592,8 @@ type StatementExecutionPreviewAPI struct { // // **NOTE** This call currently might take up to 5 seconds to get the latest // status and result. -func (a *StatementExecutionPreviewAPI) GetStatementByStatementId(ctx context.Context, statementId string) (*StatementResponse, error) { - return a.statementExecutionPreviewImpl.GetStatement(ctx, GetStatementRequest{ +func (a *StatementExecutionAPI) GetStatementByStatementId(ctx context.Context, statementId string) (*StatementResponse, error) { + return a.statementExecutionImpl.GetStatement(ctx, GetStatementRequest{ StatementId: statementId, }) } @@ -1607,14 +1608,14 @@ func (a *StatementExecutionPreviewAPI) GetStatementByStatementId(ctx context.Con // element described in the :method:statementexecution/getStatement request, and // similarly includes the `next_chunk_index` and `next_chunk_internal_link` // fields for simple iteration through the result set. -func (a *StatementExecutionPreviewAPI) GetStatementResultChunkNByStatementIdAndChunkIndex(ctx context.Context, statementId string, chunkIndex int) (*ResultData, error) { - return a.statementExecutionPreviewImpl.GetStatementResultChunkN(ctx, GetStatementResultChunkNRequest{ +func (a *StatementExecutionAPI) GetStatementResultChunkNByStatementIdAndChunkIndex(ctx context.Context, statementId string, chunkIndex int) (*ResultData, error) { + return a.statementExecutionImpl.GetStatementResultChunkN(ctx, GetStatementResultChunkNRequest{ StatementId: statementId, ChunkIndex: chunkIndex, }) } -type WarehousesPreviewInterface interface { +type WarehousesInterface interface { // Create a warehouse. // @@ -1688,7 +1689,7 @@ type WarehousesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListWarehousesRequest) ([]EndpointInfo, error) - // EndpointInfoNameToIdMap calls [WarehousesPreviewAPI.ListAll] and creates a map of results with [EndpointInfo].Name as key and [EndpointInfo].Id as value. + // EndpointInfoNameToIdMap calls [WarehousesAPI.ListAll] and creates a map of results with [EndpointInfo].Name as key and [EndpointInfo].Id as value. // // Returns an error if there's more than one [EndpointInfo] with the same .Name. // @@ -1697,7 +1698,7 @@ type WarehousesPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. EndpointInfoNameToIdMap(ctx context.Context, request ListWarehousesRequest) (map[string]string, error) - // GetByName calls [WarehousesPreviewAPI.EndpointInfoNameToIdMap] and returns a single [EndpointInfo]. + // GetByName calls [WarehousesAPI.EndpointInfoNameToIdMap] and returns a single [EndpointInfo]. // // Returns an error if there's more than one [EndpointInfo] with the same .Name. // @@ -1736,9 +1737,9 @@ type WarehousesPreviewInterface interface { UpdatePermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error) } -func NewWarehousesPreview(client *client.DatabricksClient) *WarehousesPreviewAPI { - return &WarehousesPreviewAPI{ - warehousesPreviewImpl: warehousesPreviewImpl{ +func NewWarehouses(client *client.DatabricksClient) *WarehousesAPI { + return &WarehousesAPI{ + warehousesImpl: warehousesImpl{ client: client, }, } @@ -1747,15 +1748,15 @@ func NewWarehousesPreview(client *client.DatabricksClient) *WarehousesPreviewAPI // A SQL warehouse is a compute resource that lets you run SQL commands on data // objects within Databricks SQL. Compute resources are infrastructure resources // that provide processing capabilities in the cloud. -type WarehousesPreviewAPI struct { - warehousesPreviewImpl +type WarehousesAPI struct { + warehousesImpl } // Delete a warehouse. // // Deletes a SQL warehouse. -func (a *WarehousesPreviewAPI) DeleteById(ctx context.Context, id string) error { - return a.warehousesPreviewImpl.Delete(ctx, DeleteWarehouseRequest{ +func (a *WarehousesAPI) DeleteById(ctx context.Context, id string) error { + return a.warehousesImpl.Delete(ctx, DeleteWarehouseRequest{ Id: id, }) } @@ -1763,8 +1764,8 @@ func (a *WarehousesPreviewAPI) DeleteById(ctx context.Context, id string) error // Get warehouse info. // // Gets the information for a single SQL warehouse. -func (a *WarehousesPreviewAPI) GetById(ctx context.Context, id string) (*GetWarehouseResponse, error) { - return a.warehousesPreviewImpl.Get(ctx, GetWarehouseRequest{ +func (a *WarehousesAPI) GetById(ctx context.Context, id string) (*GetWarehouseResponse, error) { + return a.warehousesImpl.Get(ctx, GetWarehouseRequest{ Id: id, }) } @@ -1772,8 +1773,8 @@ func (a *WarehousesPreviewAPI) GetById(ctx context.Context, id string) (*GetWare // Get SQL warehouse permission levels. // // Gets the permission levels that a user can have on an object. -func (a *WarehousesPreviewAPI) GetPermissionLevelsByWarehouseId(ctx context.Context, warehouseId string) (*GetWarehousePermissionLevelsResponse, error) { - return a.warehousesPreviewImpl.GetPermissionLevels(ctx, GetWarehousePermissionLevelsRequest{ +func (a *WarehousesAPI) GetPermissionLevelsByWarehouseId(ctx context.Context, warehouseId string) (*GetWarehousePermissionLevelsResponse, error) { + return a.warehousesImpl.GetPermissionLevels(ctx, GetWarehousePermissionLevelsRequest{ WarehouseId: warehouseId, }) } @@ -1782,20 +1783,20 @@ func (a *WarehousesPreviewAPI) GetPermissionLevelsByWarehouseId(ctx context.Cont // // Gets the permissions of a SQL warehouse. SQL warehouses can inherit // permissions from their root object. -func (a *WarehousesPreviewAPI) GetPermissionsByWarehouseId(ctx context.Context, warehouseId string) (*WarehousePermissions, error) { - return a.warehousesPreviewImpl.GetPermissions(ctx, GetWarehousePermissionsRequest{ +func (a *WarehousesAPI) GetPermissionsByWarehouseId(ctx context.Context, warehouseId string) (*WarehousePermissions, error) { + return a.warehousesImpl.GetPermissions(ctx, GetWarehousePermissionsRequest{ WarehouseId: warehouseId, }) } -// EndpointInfoNameToIdMap calls [WarehousesPreviewAPI.ListAll] and creates a map of results with [EndpointInfo].Name as key and [EndpointInfo].Id as value. +// EndpointInfoNameToIdMap calls [WarehousesAPI.ListAll] and creates a map of results with [EndpointInfo].Name as key and [EndpointInfo].Id as value. // // Returns an error if there's more than one [EndpointInfo] with the same .Name. // // Note: All [EndpointInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *WarehousesPreviewAPI) EndpointInfoNameToIdMap(ctx context.Context, request ListWarehousesRequest) (map[string]string, error) { +func (a *WarehousesAPI) EndpointInfoNameToIdMap(ctx context.Context, request ListWarehousesRequest) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAll(ctx, request) @@ -1813,14 +1814,14 @@ func (a *WarehousesPreviewAPI) EndpointInfoNameToIdMap(ctx context.Context, requ return mapping, nil } -// GetByName calls [WarehousesPreviewAPI.EndpointInfoNameToIdMap] and returns a single [EndpointInfo]. +// GetByName calls [WarehousesAPI.EndpointInfoNameToIdMap] and returns a single [EndpointInfo]. // // Returns an error if there's more than one [EndpointInfo] with the same .Name. // // Note: All [EndpointInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *WarehousesPreviewAPI) GetByName(ctx context.Context, name string) (*EndpointInfo, error) { +func (a *WarehousesAPI) GetByName(ctx context.Context, name string) (*EndpointInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListWarehousesRequest{}) if err != nil { diff --git a/sql/v2preview/client.go b/sql/v2preview/client.go index ffc4e68df..6d8bfce33 100755 --- a/sql/v2preview/client.go +++ b/sql/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type AlertsLegacyPreviewClient struct { - AlertsLegacyPreviewInterface +type AlertsClient struct { + AlertsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewAlertsLegacyPreviewClient(cfg *config.Config) (*AlertsLegacyPreviewClient, error) { +func NewAlertsClient(cfg *config.Config) (*AlertsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewAlertsLegacyPreviewClient(cfg *config.Config) (*AlertsLegacyPreviewClien return nil, err } - return &AlertsLegacyPreviewClient{ - Config: cfg, - apiClient: apiClient, - AlertsLegacyPreviewInterface: NewAlertsLegacyPreview(databricksClient), + return &AlertsClient{ + Config: cfg, + apiClient: apiClient, + AlertsInterface: NewAlerts(databricksClient), }, nil } -type AlertsPreviewClient struct { - AlertsPreviewInterface +type AlertsLegacyClient struct { + AlertsLegacyInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewAlertsPreviewClient(cfg *config.Config) (*AlertsPreviewClient, error) { +func NewAlertsLegacyClient(cfg *config.Config) (*AlertsLegacyClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,20 +71,20 @@ func NewAlertsPreviewClient(cfg *config.Config) (*AlertsPreviewClient, error) { return nil, err } - return &AlertsPreviewClient{ - Config: cfg, - apiClient: apiClient, - AlertsPreviewInterface: NewAlertsPreview(databricksClient), + return &AlertsLegacyClient{ + Config: cfg, + apiClient: apiClient, + AlertsLegacyInterface: NewAlertsLegacy(databricksClient), }, nil } -type DashboardWidgetsPreviewClient struct { - DashboardWidgetsPreviewInterface +type DashboardWidgetsClient struct { + DashboardWidgetsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewDashboardWidgetsPreviewClient(cfg *config.Config) (*DashboardWidgetsPreviewClient, error) { +func NewDashboardWidgetsClient(cfg *config.Config) (*DashboardWidgetsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -105,20 +105,20 @@ func NewDashboardWidgetsPreviewClient(cfg *config.Config) (*DashboardWidgetsPrev return nil, err } - return &DashboardWidgetsPreviewClient{ - Config: cfg, - apiClient: apiClient, - DashboardWidgetsPreviewInterface: NewDashboardWidgetsPreview(databricksClient), + return &DashboardWidgetsClient{ + Config: cfg, + apiClient: apiClient, + DashboardWidgetsInterface: NewDashboardWidgets(databricksClient), }, nil } -type DashboardsPreviewClient struct { - DashboardsPreviewInterface +type DashboardsClient struct { + DashboardsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewDashboardsPreviewClient(cfg *config.Config) (*DashboardsPreviewClient, error) { +func NewDashboardsClient(cfg *config.Config) (*DashboardsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -139,20 +139,20 @@ func NewDashboardsPreviewClient(cfg *config.Config) (*DashboardsPreviewClient, e return nil, err } - return &DashboardsPreviewClient{ - Config: cfg, - apiClient: apiClient, - DashboardsPreviewInterface: NewDashboardsPreview(databricksClient), + return &DashboardsClient{ + Config: cfg, + apiClient: apiClient, + DashboardsInterface: NewDashboards(databricksClient), }, nil } -type DataSourcesPreviewClient struct { - DataSourcesPreviewInterface +type DataSourcesClient struct { + DataSourcesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewDataSourcesPreviewClient(cfg *config.Config) (*DataSourcesPreviewClient, error) { +func NewDataSourcesClient(cfg *config.Config) (*DataSourcesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -173,20 +173,20 @@ func NewDataSourcesPreviewClient(cfg *config.Config) (*DataSourcesPreviewClient, return nil, err } - return &DataSourcesPreviewClient{ - Config: cfg, - apiClient: apiClient, - DataSourcesPreviewInterface: NewDataSourcesPreview(databricksClient), + return &DataSourcesClient{ + Config: cfg, + apiClient: apiClient, + DataSourcesInterface: NewDataSources(databricksClient), }, nil } -type DbsqlPermissionsPreviewClient struct { - DbsqlPermissionsPreviewInterface +type DbsqlPermissionsClient struct { + DbsqlPermissionsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewDbsqlPermissionsPreviewClient(cfg *config.Config) (*DbsqlPermissionsPreviewClient, error) { +func NewDbsqlPermissionsClient(cfg *config.Config) (*DbsqlPermissionsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -207,20 +207,20 @@ func NewDbsqlPermissionsPreviewClient(cfg *config.Config) (*DbsqlPermissionsPrev return nil, err } - return &DbsqlPermissionsPreviewClient{ - Config: cfg, - apiClient: apiClient, - DbsqlPermissionsPreviewInterface: NewDbsqlPermissionsPreview(databricksClient), + return &DbsqlPermissionsClient{ + Config: cfg, + apiClient: apiClient, + DbsqlPermissionsInterface: NewDbsqlPermissions(databricksClient), }, nil } -type QueriesLegacyPreviewClient struct { - QueriesLegacyPreviewInterface +type QueriesClient struct { + QueriesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewQueriesLegacyPreviewClient(cfg *config.Config) (*QueriesLegacyPreviewClient, error) { +func NewQueriesClient(cfg *config.Config) (*QueriesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -241,20 +241,20 @@ func NewQueriesLegacyPreviewClient(cfg *config.Config) (*QueriesLegacyPreviewCli return nil, err } - return &QueriesLegacyPreviewClient{ - Config: cfg, - apiClient: apiClient, - QueriesLegacyPreviewInterface: NewQueriesLegacyPreview(databricksClient), + return &QueriesClient{ + Config: cfg, + apiClient: apiClient, + QueriesInterface: NewQueries(databricksClient), }, nil } -type QueriesPreviewClient struct { - QueriesPreviewInterface +type QueriesLegacyClient struct { + QueriesLegacyInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewQueriesPreviewClient(cfg *config.Config) (*QueriesPreviewClient, error) { +func NewQueriesLegacyClient(cfg *config.Config) (*QueriesLegacyClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -275,20 +275,20 @@ func NewQueriesPreviewClient(cfg *config.Config) (*QueriesPreviewClient, error) return nil, err } - return &QueriesPreviewClient{ - Config: cfg, - apiClient: apiClient, - QueriesPreviewInterface: NewQueriesPreview(databricksClient), + return &QueriesLegacyClient{ + Config: cfg, + apiClient: apiClient, + QueriesLegacyInterface: NewQueriesLegacy(databricksClient), }, nil } -type QueryHistoryPreviewClient struct { - QueryHistoryPreviewInterface +type QueryHistoryClient struct { + QueryHistoryInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewQueryHistoryPreviewClient(cfg *config.Config) (*QueryHistoryPreviewClient, error) { +func NewQueryHistoryClient(cfg *config.Config) (*QueryHistoryClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -309,20 +309,20 @@ func NewQueryHistoryPreviewClient(cfg *config.Config) (*QueryHistoryPreviewClien return nil, err } - return &QueryHistoryPreviewClient{ - Config: cfg, - apiClient: apiClient, - QueryHistoryPreviewInterface: NewQueryHistoryPreview(databricksClient), + return &QueryHistoryClient{ + Config: cfg, + apiClient: apiClient, + QueryHistoryInterface: NewQueryHistory(databricksClient), }, nil } -type QueryVisualizationsLegacyPreviewClient struct { - QueryVisualizationsLegacyPreviewInterface +type QueryVisualizationsClient struct { + QueryVisualizationsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewQueryVisualizationsLegacyPreviewClient(cfg *config.Config) (*QueryVisualizationsLegacyPreviewClient, error) { +func NewQueryVisualizationsClient(cfg *config.Config) (*QueryVisualizationsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -343,20 +343,20 @@ func NewQueryVisualizationsLegacyPreviewClient(cfg *config.Config) (*QueryVisual return nil, err } - return &QueryVisualizationsLegacyPreviewClient{ - Config: cfg, - apiClient: apiClient, - QueryVisualizationsLegacyPreviewInterface: NewQueryVisualizationsLegacyPreview(databricksClient), + return &QueryVisualizationsClient{ + Config: cfg, + apiClient: apiClient, + QueryVisualizationsInterface: NewQueryVisualizations(databricksClient), }, nil } -type QueryVisualizationsPreviewClient struct { - QueryVisualizationsPreviewInterface +type QueryVisualizationsLegacyClient struct { + QueryVisualizationsLegacyInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewQueryVisualizationsPreviewClient(cfg *config.Config) (*QueryVisualizationsPreviewClient, error) { +func NewQueryVisualizationsLegacyClient(cfg *config.Config) (*QueryVisualizationsLegacyClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -377,20 +377,20 @@ func NewQueryVisualizationsPreviewClient(cfg *config.Config) (*QueryVisualizatio return nil, err } - return &QueryVisualizationsPreviewClient{ - Config: cfg, - apiClient: apiClient, - QueryVisualizationsPreviewInterface: NewQueryVisualizationsPreview(databricksClient), + return &QueryVisualizationsLegacyClient{ + Config: cfg, + apiClient: apiClient, + QueryVisualizationsLegacyInterface: NewQueryVisualizationsLegacy(databricksClient), }, nil } -type RedashConfigPreviewClient struct { - RedashConfigPreviewInterface +type RedashConfigClient struct { + RedashConfigInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewRedashConfigPreviewClient(cfg *config.Config) (*RedashConfigPreviewClient, error) { +func NewRedashConfigClient(cfg *config.Config) (*RedashConfigClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -411,20 +411,20 @@ func NewRedashConfigPreviewClient(cfg *config.Config) (*RedashConfigPreviewClien return nil, err } - return &RedashConfigPreviewClient{ - Config: cfg, - apiClient: apiClient, - RedashConfigPreviewInterface: NewRedashConfigPreview(databricksClient), + return &RedashConfigClient{ + Config: cfg, + apiClient: apiClient, + RedashConfigInterface: NewRedashConfig(databricksClient), }, nil } -type StatementExecutionPreviewClient struct { - StatementExecutionPreviewInterface +type StatementExecutionClient struct { + StatementExecutionInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewStatementExecutionPreviewClient(cfg *config.Config) (*StatementExecutionPreviewClient, error) { +func NewStatementExecutionClient(cfg *config.Config) (*StatementExecutionClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -445,20 +445,20 @@ func NewStatementExecutionPreviewClient(cfg *config.Config) (*StatementExecution return nil, err } - return &StatementExecutionPreviewClient{ - Config: cfg, - apiClient: apiClient, - StatementExecutionPreviewInterface: NewStatementExecutionPreview(databricksClient), + return &StatementExecutionClient{ + Config: cfg, + apiClient: apiClient, + StatementExecutionInterface: NewStatementExecution(databricksClient), }, nil } -type WarehousesPreviewClient struct { - WarehousesPreviewInterface +type WarehousesClient struct { + WarehousesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewWarehousesPreviewClient(cfg *config.Config) (*WarehousesPreviewClient, error) { +func NewWarehousesClient(cfg *config.Config) (*WarehousesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -479,9 +479,9 @@ func NewWarehousesPreviewClient(cfg *config.Config) (*WarehousesPreviewClient, e return nil, err } - return &WarehousesPreviewClient{ - Config: cfg, - apiClient: apiClient, - WarehousesPreviewInterface: NewWarehousesPreview(databricksClient), + return &WarehousesClient{ + Config: cfg, + apiClient: apiClient, + WarehousesInterface: NewWarehouses(databricksClient), }, nil } diff --git a/sql/v2preview/impl.go b/sql/v2preview/impl.go index 5b7198c3e..c3d29f232 100755 --- a/sql/v2preview/impl.go +++ b/sql/v2preview/impl.go @@ -12,69 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just AlertsLegacyPreview API methods -type alertsLegacyPreviewImpl struct { +// unexported type that holds implementations of just Alerts API methods +type alertsImpl struct { client *client.DatabricksClient } -func (a *alertsLegacyPreviewImpl) Create(ctx context.Context, request CreateAlert) (*LegacyAlert, error) { - var legacyAlert LegacyAlert - path := "/api/2.0preview/preview/sql/alerts" - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyAlert) - return &legacyAlert, err -} - -func (a *alertsLegacyPreviewImpl) Delete(ctx context.Context, request DeleteAlertsLegacyRequest) error { - var deleteResponse DeleteResponse - path := fmt.Sprintf("/api/2.0preview/preview/sql/alerts/%v", request.AlertId) - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) - return err -} - -func (a *alertsLegacyPreviewImpl) Get(ctx context.Context, request GetAlertsLegacyRequest) (*LegacyAlert, error) { - var legacyAlert LegacyAlert - path := fmt.Sprintf("/api/2.0preview/preview/sql/alerts/%v", request.AlertId) - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &legacyAlert) - return &legacyAlert, err -} - -func (a *alertsLegacyPreviewImpl) List(ctx context.Context) ([]LegacyAlert, error) { - var legacyAlertList []LegacyAlert - path := "/api/2.0preview/preview/sql/alerts" - - headers := make(map[string]string) - headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &legacyAlertList) - return legacyAlertList, err -} - -func (a *alertsLegacyPreviewImpl) Update(ctx context.Context, request EditAlert) error { - var updateResponse UpdateResponse - path := fmt.Sprintf("/api/2.0preview/preview/sql/alerts/%v", request.AlertId) - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) - return err -} - -// unexported type that holds implementations of just AlertsPreview API methods -type alertsPreviewImpl struct { - client *client.DatabricksClient -} - -func (a *alertsPreviewImpl) Create(ctx context.Context, request CreateAlertRequest) (*Alert, error) { +func (a *alertsImpl) Create(ctx context.Context, request CreateAlertRequest) (*Alert, error) { var alert Alert path := "/api/2.0preview/sql/alerts" queryParams := make(map[string]any) @@ -85,7 +28,7 @@ func (a *alertsPreviewImpl) Create(ctx context.Context, request CreateAlertReque return &alert, err } -func (a *alertsPreviewImpl) Delete(ctx context.Context, request TrashAlertRequest) error { +func (a *alertsImpl) Delete(ctx context.Context, request TrashAlertRequest) error { var empty Empty path := fmt.Sprintf("/api/2.0preview/sql/alerts/%v", request.Id) queryParams := make(map[string]any) @@ -95,7 +38,7 @@ func (a *alertsPreviewImpl) Delete(ctx context.Context, request TrashAlertReques return err } -func (a *alertsPreviewImpl) Get(ctx context.Context, request GetAlertRequest) (*Alert, error) { +func (a *alertsImpl) Get(ctx context.Context, request GetAlertRequest) (*Alert, error) { var alert Alert path := fmt.Sprintf("/api/2.0preview/sql/alerts/%v", request.Id) queryParams := make(map[string]any) @@ -110,7 +53,7 @@ func (a *alertsPreviewImpl) Get(ctx context.Context, request GetAlertRequest) (* // Gets a list of alerts accessible to the user, ordered by creation time. // **Warning:** Calling this API concurrently 10 or more times could result in // throttling, service degradation, or a temporary ban. -func (a *alertsPreviewImpl) List(ctx context.Context, request ListAlertsRequest) listing.Iterator[ListAlertsResponseAlert] { +func (a *alertsImpl) List(ctx context.Context, request ListAlertsRequest) listing.Iterator[ListAlertsResponseAlert] { getNextPage := func(ctx context.Context, req ListAlertsRequest) (*ListAlertsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -139,11 +82,11 @@ func (a *alertsPreviewImpl) List(ctx context.Context, request ListAlertsRequest) // Gets a list of alerts accessible to the user, ordered by creation time. // **Warning:** Calling this API concurrently 10 or more times could result in // throttling, service degradation, or a temporary ban. -func (a *alertsPreviewImpl) ListAll(ctx context.Context, request ListAlertsRequest) ([]ListAlertsResponseAlert, error) { +func (a *alertsImpl) ListAll(ctx context.Context, request ListAlertsRequest) ([]ListAlertsResponseAlert, error) { iterator := a.List(ctx, request) return listing.ToSlice[ListAlertsResponseAlert](ctx, iterator) } -func (a *alertsPreviewImpl) internalList(ctx context.Context, request ListAlertsRequest) (*ListAlertsResponse, error) { +func (a *alertsImpl) internalList(ctx context.Context, request ListAlertsRequest) (*ListAlertsResponse, error) { var listAlertsResponse ListAlertsResponse path := "/api/2.0preview/sql/alerts" queryParams := make(map[string]any) @@ -153,7 +96,7 @@ func (a *alertsPreviewImpl) internalList(ctx context.Context, request ListAlerts return &listAlertsResponse, err } -func (a *alertsPreviewImpl) Update(ctx context.Context, request UpdateAlertRequest) (*Alert, error) { +func (a *alertsImpl) Update(ctx context.Context, request UpdateAlertRequest) (*Alert, error) { var alert Alert path := fmt.Sprintf("/api/2.0preview/sql/alerts/%v", request.Id) queryParams := make(map[string]any) @@ -164,12 +107,69 @@ func (a *alertsPreviewImpl) Update(ctx context.Context, request UpdateAlertReque return &alert, err } -// unexported type that holds implementations of just DashboardWidgetsPreview API methods -type dashboardWidgetsPreviewImpl struct { +// unexported type that holds implementations of just AlertsLegacy API methods +type alertsLegacyImpl struct { + client *client.DatabricksClient +} + +func (a *alertsLegacyImpl) Create(ctx context.Context, request CreateAlert) (*LegacyAlert, error) { + var legacyAlert LegacyAlert + path := "/api/2.0preview/preview/sql/alerts" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyAlert) + return &legacyAlert, err +} + +func (a *alertsLegacyImpl) Delete(ctx context.Context, request DeleteAlertsLegacyRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/alerts/%v", request.AlertId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *alertsLegacyImpl) Get(ctx context.Context, request GetAlertsLegacyRequest) (*LegacyAlert, error) { + var legacyAlert LegacyAlert + path := fmt.Sprintf("/api/2.0preview/preview/sql/alerts/%v", request.AlertId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &legacyAlert) + return &legacyAlert, err +} + +func (a *alertsLegacyImpl) List(ctx context.Context) ([]LegacyAlert, error) { + var legacyAlertList []LegacyAlert + path := "/api/2.0preview/preview/sql/alerts" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &legacyAlertList) + return legacyAlertList, err +} + +func (a *alertsLegacyImpl) Update(ctx context.Context, request EditAlert) error { + var updateResponse UpdateResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/alerts/%v", request.AlertId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) + return err +} + +// unexported type that holds implementations of just DashboardWidgets API methods +type dashboardWidgetsImpl struct { client *client.DatabricksClient } -func (a *dashboardWidgetsPreviewImpl) Create(ctx context.Context, request CreateWidget) (*Widget, error) { +func (a *dashboardWidgetsImpl) Create(ctx context.Context, request CreateWidget) (*Widget, error) { var widget Widget path := "/api/2.0preview/preview/sql/widgets" queryParams := make(map[string]any) @@ -180,7 +180,7 @@ func (a *dashboardWidgetsPreviewImpl) Create(ctx context.Context, request Create return &widget, err } -func (a *dashboardWidgetsPreviewImpl) Delete(ctx context.Context, request DeleteDashboardWidgetRequest) error { +func (a *dashboardWidgetsImpl) Delete(ctx context.Context, request DeleteDashboardWidgetRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/preview/sql/widgets/%v", request.Id) queryParams := make(map[string]any) @@ -190,7 +190,7 @@ func (a *dashboardWidgetsPreviewImpl) Delete(ctx context.Context, request Delete return err } -func (a *dashboardWidgetsPreviewImpl) Update(ctx context.Context, request CreateWidget) (*Widget, error) { +func (a *dashboardWidgetsImpl) Update(ctx context.Context, request CreateWidget) (*Widget, error) { var widget Widget path := fmt.Sprintf("/api/2.0preview/preview/sql/widgets/%v", request.Id) queryParams := make(map[string]any) @@ -201,12 +201,12 @@ func (a *dashboardWidgetsPreviewImpl) Update(ctx context.Context, request Create return &widget, err } -// unexported type that holds implementations of just DashboardsPreview API methods -type dashboardsPreviewImpl struct { +// unexported type that holds implementations of just Dashboards API methods +type dashboardsImpl struct { client *client.DatabricksClient } -func (a *dashboardsPreviewImpl) Create(ctx context.Context, request DashboardPostContent) (*Dashboard, error) { +func (a *dashboardsImpl) Create(ctx context.Context, request DashboardPostContent) (*Dashboard, error) { var dashboard Dashboard path := "/api/2.0preview/preview/sql/dashboards" queryParams := make(map[string]any) @@ -217,7 +217,7 @@ func (a *dashboardsPreviewImpl) Create(ctx context.Context, request DashboardPos return &dashboard, err } -func (a *dashboardsPreviewImpl) Delete(ctx context.Context, request DeleteDashboardRequest) error { +func (a *dashboardsImpl) Delete(ctx context.Context, request DeleteDashboardRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0preview/preview/sql/dashboards/%v", request.DashboardId) queryParams := make(map[string]any) @@ -227,7 +227,7 @@ func (a *dashboardsPreviewImpl) Delete(ctx context.Context, request DeleteDashbo return err } -func (a *dashboardsPreviewImpl) Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error) { +func (a *dashboardsImpl) Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error) { var dashboard Dashboard path := fmt.Sprintf("/api/2.0preview/preview/sql/dashboards/%v", request.DashboardId) queryParams := make(map[string]any) @@ -243,7 +243,7 @@ func (a *dashboardsPreviewImpl) Get(ctx context.Context, request GetDashboardReq // // **Warning**: Calling this API concurrently 10 or more times could result in // throttling, service degradation, or a temporary ban. -func (a *dashboardsPreviewImpl) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { +func (a *dashboardsImpl) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { request.Page = 1 // start iterating from the first page @@ -280,12 +280,12 @@ func (a *dashboardsPreviewImpl) List(ctx context.Context, request ListDashboards // // **Warning**: Calling this API concurrently 10 or more times could result in // throttling, service degradation, or a temporary ban. -func (a *dashboardsPreviewImpl) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { +func (a *dashboardsImpl) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { iterator := a.List(ctx, request) return listing.ToSliceN[Dashboard, int](ctx, iterator, request.PageSize) } -func (a *dashboardsPreviewImpl) internalList(ctx context.Context, request ListDashboardsRequest) (*ListResponse, error) { +func (a *dashboardsImpl) internalList(ctx context.Context, request ListDashboardsRequest) (*ListResponse, error) { var listResponse ListResponse path := "/api/2.0preview/preview/sql/dashboards" queryParams := make(map[string]any) @@ -295,7 +295,7 @@ func (a *dashboardsPreviewImpl) internalList(ctx context.Context, request ListDa return &listResponse, err } -func (a *dashboardsPreviewImpl) Restore(ctx context.Context, request RestoreDashboardRequest) error { +func (a *dashboardsImpl) Restore(ctx context.Context, request RestoreDashboardRequest) error { var restoreResponse RestoreResponse path := fmt.Sprintf("/api/2.0preview/preview/sql/dashboards/trash/%v", request.DashboardId) queryParams := make(map[string]any) @@ -305,7 +305,7 @@ func (a *dashboardsPreviewImpl) Restore(ctx context.Context, request RestoreDash return err } -func (a *dashboardsPreviewImpl) Update(ctx context.Context, request DashboardEditContent) (*Dashboard, error) { +func (a *dashboardsImpl) Update(ctx context.Context, request DashboardEditContent) (*Dashboard, error) { var dashboard Dashboard path := fmt.Sprintf("/api/2.0preview/preview/sql/dashboards/%v", request.DashboardId) queryParams := make(map[string]any) @@ -316,12 +316,12 @@ func (a *dashboardsPreviewImpl) Update(ctx context.Context, request DashboardEdi return &dashboard, err } -// unexported type that holds implementations of just DataSourcesPreview API methods -type dataSourcesPreviewImpl struct { +// unexported type that holds implementations of just DataSources API methods +type dataSourcesImpl struct { client *client.DatabricksClient } -func (a *dataSourcesPreviewImpl) List(ctx context.Context) ([]DataSource, error) { +func (a *dataSourcesImpl) List(ctx context.Context) ([]DataSource, error) { var dataSourceList []DataSource path := "/api/2.0preview/preview/sql/data_sources" @@ -331,12 +331,12 @@ func (a *dataSourcesPreviewImpl) List(ctx context.Context) ([]DataSource, error) return dataSourceList, err } -// unexported type that holds implementations of just DbsqlPermissionsPreview API methods -type dbsqlPermissionsPreviewImpl struct { +// unexported type that holds implementations of just DbsqlPermissions API methods +type dbsqlPermissionsImpl struct { client *client.DatabricksClient } -func (a *dbsqlPermissionsPreviewImpl) Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error) { +func (a *dbsqlPermissionsImpl) Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error) { var getResponse GetResponse path := fmt.Sprintf("/api/2.0preview/preview/sql/permissions/%v/%v", request.ObjectType, request.ObjectId) queryParams := make(map[string]any) @@ -346,7 +346,7 @@ func (a *dbsqlPermissionsPreviewImpl) Get(ctx context.Context, request GetDbsqlP return &getResponse, err } -func (a *dbsqlPermissionsPreviewImpl) Set(ctx context.Context, request SetRequest) (*SetResponse, error) { +func (a *dbsqlPermissionsImpl) Set(ctx context.Context, request SetRequest) (*SetResponse, error) { var setResponse SetResponse path := fmt.Sprintf("/api/2.0preview/preview/sql/permissions/%v/%v", request.ObjectType, request.ObjectId) queryParams := make(map[string]any) @@ -357,7 +357,7 @@ func (a *dbsqlPermissionsPreviewImpl) Set(ctx context.Context, request SetReques return &setResponse, err } -func (a *dbsqlPermissionsPreviewImpl) TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error) { +func (a *dbsqlPermissionsImpl) TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error) { var success Success path := fmt.Sprintf("/api/2.0preview/preview/sql/permissions/%v/%v/transfer", request.ObjectType, request.ObjectId) queryParams := make(map[string]any) @@ -368,139 +368,12 @@ func (a *dbsqlPermissionsPreviewImpl) TransferOwnership(ctx context.Context, req return &success, err } -// unexported type that holds implementations of just QueriesLegacyPreview API methods -type queriesLegacyPreviewImpl struct { - client *client.DatabricksClient -} - -func (a *queriesLegacyPreviewImpl) Create(ctx context.Context, request QueryPostContent) (*LegacyQuery, error) { - var legacyQuery LegacyQuery - path := "/api/2.0preview/preview/sql/queries" - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyQuery) - return &legacyQuery, err -} - -func (a *queriesLegacyPreviewImpl) Delete(ctx context.Context, request DeleteQueriesLegacyRequest) error { - var deleteResponse DeleteResponse - path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/%v", request.QueryId) - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) - return err -} - -func (a *queriesLegacyPreviewImpl) Get(ctx context.Context, request GetQueriesLegacyRequest) (*LegacyQuery, error) { - var legacyQuery LegacyQuery - path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/%v", request.QueryId) - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &legacyQuery) - return &legacyQuery, err -} - -// Get a list of queries. -// -// Gets a list of queries. Optionally, this list can be filtered by a search -// term. -// -// **Warning**: Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban. -// -// **Note**: A new version of the Databricks SQL API is now available. Please -// use :method:queries/list instead. [Learn more] -// -// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -func (a *queriesLegacyPreviewImpl) List(ctx context.Context, request ListQueriesLegacyRequest) listing.Iterator[LegacyQuery] { - - request.Page = 1 // start iterating from the first page - - getNextPage := func(ctx context.Context, req ListQueriesLegacyRequest) (*QueryList, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.internalList(ctx, req) - } - getItems := func(resp *QueryList) []LegacyQuery { - return resp.Results - } - getNextReq := func(resp *QueryList) *ListQueriesLegacyRequest { - if len(getItems(resp)) == 0 { - return nil - } - request.Page = resp.Page + 1 - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - dedupedIterator := listing.NewDedupeIterator[LegacyQuery, string]( - iterator, - func(item LegacyQuery) string { - return item.Id - }) - return dedupedIterator -} - -// Get a list of queries. -// -// Gets a list of queries. Optionally, this list can be filtered by a search -// term. -// -// **Warning**: Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban. -// -// **Note**: A new version of the Databricks SQL API is now available. Please -// use :method:queries/list instead. [Learn more] -// -// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -func (a *queriesLegacyPreviewImpl) ListAll(ctx context.Context, request ListQueriesLegacyRequest) ([]LegacyQuery, error) { - iterator := a.List(ctx, request) - return listing.ToSliceN[LegacyQuery, int](ctx, iterator, request.PageSize) - -} -func (a *queriesLegacyPreviewImpl) internalList(ctx context.Context, request ListQueriesLegacyRequest) (*QueryList, error) { - var queryList QueryList - path := "/api/2.0preview/preview/sql/queries" - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &queryList) - return &queryList, err -} - -func (a *queriesLegacyPreviewImpl) Restore(ctx context.Context, request RestoreQueriesLegacyRequest) error { - var restoreResponse RestoreResponse - path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/trash/%v", request.QueryId) - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &restoreResponse) - return err -} - -func (a *queriesLegacyPreviewImpl) Update(ctx context.Context, request QueryEditContent) (*LegacyQuery, error) { - var legacyQuery LegacyQuery - path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/%v", request.QueryId) - queryParams := make(map[string]any) - headers := make(map[string]string) - headers["Accept"] = "application/json" - headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyQuery) - return &legacyQuery, err -} - -// unexported type that holds implementations of just QueriesPreview API methods -type queriesPreviewImpl struct { +// unexported type that holds implementations of just Queries API methods +type queriesImpl struct { client *client.DatabricksClient } -func (a *queriesPreviewImpl) Create(ctx context.Context, request CreateQueryRequest) (*Query, error) { +func (a *queriesImpl) Create(ctx context.Context, request CreateQueryRequest) (*Query, error) { var query Query path := "/api/2.0preview/sql/queries" queryParams := make(map[string]any) @@ -511,7 +384,7 @@ func (a *queriesPreviewImpl) Create(ctx context.Context, request CreateQueryRequ return &query, err } -func (a *queriesPreviewImpl) Delete(ctx context.Context, request TrashQueryRequest) error { +func (a *queriesImpl) Delete(ctx context.Context, request TrashQueryRequest) error { var empty Empty path := fmt.Sprintf("/api/2.0preview/sql/queries/%v", request.Id) queryParams := make(map[string]any) @@ -521,7 +394,7 @@ func (a *queriesPreviewImpl) Delete(ctx context.Context, request TrashQueryReque return err } -func (a *queriesPreviewImpl) Get(ctx context.Context, request GetQueryRequest) (*Query, error) { +func (a *queriesImpl) Get(ctx context.Context, request GetQueryRequest) (*Query, error) { var query Query path := fmt.Sprintf("/api/2.0preview/sql/queries/%v", request.Id) queryParams := make(map[string]any) @@ -536,7 +409,7 @@ func (a *queriesPreviewImpl) Get(ctx context.Context, request GetQueryRequest) ( // Gets a list of queries accessible to the user, ordered by creation time. // **Warning:** Calling this API concurrently 10 or more times could result in // throttling, service degradation, or a temporary ban. -func (a *queriesPreviewImpl) List(ctx context.Context, request ListQueriesRequest) listing.Iterator[ListQueryObjectsResponseQuery] { +func (a *queriesImpl) List(ctx context.Context, request ListQueriesRequest) listing.Iterator[ListQueryObjectsResponseQuery] { getNextPage := func(ctx context.Context, req ListQueriesRequest) (*ListQueryObjectsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -565,11 +438,11 @@ func (a *queriesPreviewImpl) List(ctx context.Context, request ListQueriesReques // Gets a list of queries accessible to the user, ordered by creation time. // **Warning:** Calling this API concurrently 10 or more times could result in // throttling, service degradation, or a temporary ban. -func (a *queriesPreviewImpl) ListAll(ctx context.Context, request ListQueriesRequest) ([]ListQueryObjectsResponseQuery, error) { +func (a *queriesImpl) ListAll(ctx context.Context, request ListQueriesRequest) ([]ListQueryObjectsResponseQuery, error) { iterator := a.List(ctx, request) return listing.ToSlice[ListQueryObjectsResponseQuery](ctx, iterator) } -func (a *queriesPreviewImpl) internalList(ctx context.Context, request ListQueriesRequest) (*ListQueryObjectsResponse, error) { +func (a *queriesImpl) internalList(ctx context.Context, request ListQueriesRequest) (*ListQueryObjectsResponse, error) { var listQueryObjectsResponse ListQueryObjectsResponse path := "/api/2.0preview/sql/queries" queryParams := make(map[string]any) @@ -582,7 +455,7 @@ func (a *queriesPreviewImpl) internalList(ctx context.Context, request ListQueri // List visualizations on a query. // // Gets a list of visualizations on a query. -func (a *queriesPreviewImpl) ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) listing.Iterator[Visualization] { +func (a *queriesImpl) ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) listing.Iterator[Visualization] { getNextPage := func(ctx context.Context, req ListVisualizationsForQueryRequest) (*ListVisualizationsForQueryResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -609,11 +482,11 @@ func (a *queriesPreviewImpl) ListVisualizations(ctx context.Context, request Lis // List visualizations on a query. // // Gets a list of visualizations on a query. -func (a *queriesPreviewImpl) ListVisualizationsAll(ctx context.Context, request ListVisualizationsForQueryRequest) ([]Visualization, error) { +func (a *queriesImpl) ListVisualizationsAll(ctx context.Context, request ListVisualizationsForQueryRequest) ([]Visualization, error) { iterator := a.ListVisualizations(ctx, request) return listing.ToSlice[Visualization](ctx, iterator) } -func (a *queriesPreviewImpl) internalListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) (*ListVisualizationsForQueryResponse, error) { +func (a *queriesImpl) internalListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) (*ListVisualizationsForQueryResponse, error) { var listVisualizationsForQueryResponse ListVisualizationsForQueryResponse path := fmt.Sprintf("/api/2.0preview/sql/queries/%v/visualizations", request.Id) queryParams := make(map[string]any) @@ -623,7 +496,7 @@ func (a *queriesPreviewImpl) internalListVisualizations(ctx context.Context, req return &listVisualizationsForQueryResponse, err } -func (a *queriesPreviewImpl) Update(ctx context.Context, request UpdateQueryRequest) (*Query, error) { +func (a *queriesImpl) Update(ctx context.Context, request UpdateQueryRequest) (*Query, error) { var query Query path := fmt.Sprintf("/api/2.0preview/sql/queries/%v", request.Id) queryParams := make(map[string]any) @@ -634,64 +507,154 @@ func (a *queriesPreviewImpl) Update(ctx context.Context, request UpdateQueryRequ return &query, err } -// unexported type that holds implementations of just QueryHistoryPreview API methods -type queryHistoryPreviewImpl struct { +// unexported type that holds implementations of just QueriesLegacy API methods +type queriesLegacyImpl struct { client *client.DatabricksClient } -func (a *queryHistoryPreviewImpl) List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error) { - var listQueriesResponse ListQueriesResponse - path := "/api/2.0preview/sql/history/queries" +func (a *queriesLegacyImpl) Create(ctx context.Context, request QueryPostContent) (*LegacyQuery, error) { + var legacyQuery LegacyQuery + path := "/api/2.0preview/preview/sql/queries" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listQueriesResponse) - return &listQueriesResponse, err + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyQuery) + return &legacyQuery, err } -// unexported type that holds implementations of just QueryVisualizationsLegacyPreview API methods -type queryVisualizationsLegacyPreviewImpl struct { - client *client.DatabricksClient +func (a *queriesLegacyImpl) Delete(ctx context.Context, request DeleteQueriesLegacyRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/%v", request.QueryId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err } -func (a *queryVisualizationsLegacyPreviewImpl) Create(ctx context.Context, request CreateQueryVisualizationsLegacyRequest) (*LegacyVisualization, error) { - var legacyVisualization LegacyVisualization - path := "/api/2.0preview/preview/sql/visualizations" +func (a *queriesLegacyImpl) Get(ctx context.Context, request GetQueriesLegacyRequest) (*LegacyQuery, error) { + var legacyQuery LegacyQuery + path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/%v", request.QueryId) queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyVisualization) - return &legacyVisualization, err + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &legacyQuery) + return &legacyQuery, err } -func (a *queryVisualizationsLegacyPreviewImpl) Delete(ctx context.Context, request DeleteQueryVisualizationsLegacyRequest) error { - var deleteResponse DeleteResponse - path := fmt.Sprintf("/api/2.0preview/preview/sql/visualizations/%v", request.Id) +// Get a list of queries. +// +// Gets a list of queries. Optionally, this list can be filtered by a search +// term. +// +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:queries/list instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *queriesLegacyImpl) List(ctx context.Context, request ListQueriesLegacyRequest) listing.Iterator[LegacyQuery] { + + request.Page = 1 // start iterating from the first page + + getNextPage := func(ctx context.Context, req ListQueriesLegacyRequest) (*QueryList, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *QueryList) []LegacyQuery { + return resp.Results + } + getNextReq := func(resp *QueryList) *ListQueriesLegacyRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.Page = resp.Page + 1 + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[LegacyQuery, string]( + iterator, + func(item LegacyQuery) string { + return item.Id + }) + return dedupedIterator +} + +// Get a list of queries. +// +// Gets a list of queries. Optionally, this list can be filtered by a search +// term. +// +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:queries/list instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *queriesLegacyImpl) ListAll(ctx context.Context, request ListQueriesLegacyRequest) ([]LegacyQuery, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[LegacyQuery, int](ctx, iterator, request.PageSize) + +} +func (a *queriesLegacyImpl) internalList(ctx context.Context, request ListQueriesLegacyRequest) (*QueryList, error) { + var queryList QueryList + path := "/api/2.0preview/preview/sql/queries" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &queryList) + return &queryList, err +} + +func (a *queriesLegacyImpl) Restore(ctx context.Context, request RestoreQueriesLegacyRequest) error { + var restoreResponse RestoreResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/trash/%v", request.QueryId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &restoreResponse) return err } -func (a *queryVisualizationsLegacyPreviewImpl) Update(ctx context.Context, request LegacyVisualization) (*LegacyVisualization, error) { - var legacyVisualization LegacyVisualization - path := fmt.Sprintf("/api/2.0preview/preview/sql/visualizations/%v", request.Id) +func (a *queriesLegacyImpl) Update(ctx context.Context, request QueryEditContent) (*LegacyQuery, error) { + var legacyQuery LegacyQuery + path := fmt.Sprintf("/api/2.0preview/preview/sql/queries/%v", request.QueryId) queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyVisualization) - return &legacyVisualization, err + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyQuery) + return &legacyQuery, err } -// unexported type that holds implementations of just QueryVisualizationsPreview API methods -type queryVisualizationsPreviewImpl struct { +// unexported type that holds implementations of just QueryHistory API methods +type queryHistoryImpl struct { client *client.DatabricksClient } -func (a *queryVisualizationsPreviewImpl) Create(ctx context.Context, request CreateVisualizationRequest) (*Visualization, error) { +func (a *queryHistoryImpl) List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error) { + var listQueriesResponse ListQueriesResponse + path := "/api/2.0preview/sql/history/queries" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listQueriesResponse) + return &listQueriesResponse, err +} + +// unexported type that holds implementations of just QueryVisualizations API methods +type queryVisualizationsImpl struct { + client *client.DatabricksClient +} + +func (a *queryVisualizationsImpl) Create(ctx context.Context, request CreateVisualizationRequest) (*Visualization, error) { var visualization Visualization path := "/api/2.0preview/sql/visualizations" queryParams := make(map[string]any) @@ -702,7 +665,7 @@ func (a *queryVisualizationsPreviewImpl) Create(ctx context.Context, request Cre return &visualization, err } -func (a *queryVisualizationsPreviewImpl) Delete(ctx context.Context, request DeleteVisualizationRequest) error { +func (a *queryVisualizationsImpl) Delete(ctx context.Context, request DeleteVisualizationRequest) error { var empty Empty path := fmt.Sprintf("/api/2.0preview/sql/visualizations/%v", request.Id) queryParams := make(map[string]any) @@ -712,7 +675,7 @@ func (a *queryVisualizationsPreviewImpl) Delete(ctx context.Context, request Del return err } -func (a *queryVisualizationsPreviewImpl) Update(ctx context.Context, request UpdateVisualizationRequest) (*Visualization, error) { +func (a *queryVisualizationsImpl) Update(ctx context.Context, request UpdateVisualizationRequest) (*Visualization, error) { var visualization Visualization path := fmt.Sprintf("/api/2.0preview/sql/visualizations/%v", request.Id) queryParams := make(map[string]any) @@ -723,12 +686,49 @@ func (a *queryVisualizationsPreviewImpl) Update(ctx context.Context, request Upd return &visualization, err } -// unexported type that holds implementations of just RedashConfigPreview API methods -type redashConfigPreviewImpl struct { +// unexported type that holds implementations of just QueryVisualizationsLegacy API methods +type queryVisualizationsLegacyImpl struct { + client *client.DatabricksClient +} + +func (a *queryVisualizationsLegacyImpl) Create(ctx context.Context, request CreateQueryVisualizationsLegacyRequest) (*LegacyVisualization, error) { + var legacyVisualization LegacyVisualization + path := "/api/2.0preview/preview/sql/visualizations" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyVisualization) + return &legacyVisualization, err +} + +func (a *queryVisualizationsLegacyImpl) Delete(ctx context.Context, request DeleteQueryVisualizationsLegacyRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.0preview/preview/sql/visualizations/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *queryVisualizationsLegacyImpl) Update(ctx context.Context, request LegacyVisualization) (*LegacyVisualization, error) { + var legacyVisualization LegacyVisualization + path := fmt.Sprintf("/api/2.0preview/preview/sql/visualizations/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyVisualization) + return &legacyVisualization, err +} + +// unexported type that holds implementations of just RedashConfig API methods +type redashConfigImpl struct { client *client.DatabricksClient } -func (a *redashConfigPreviewImpl) GetConfig(ctx context.Context) (*ClientConfig, error) { +func (a *redashConfigImpl) GetConfig(ctx context.Context) (*ClientConfig, error) { var clientConfig ClientConfig path := "/api/2.0preview/redash-v2/config" @@ -738,12 +738,12 @@ func (a *redashConfigPreviewImpl) GetConfig(ctx context.Context) (*ClientConfig, return &clientConfig, err } -// unexported type that holds implementations of just StatementExecutionPreview API methods -type statementExecutionPreviewImpl struct { +// unexported type that holds implementations of just StatementExecution API methods +type statementExecutionImpl struct { client *client.DatabricksClient } -func (a *statementExecutionPreviewImpl) CancelExecution(ctx context.Context, request CancelExecutionRequest) error { +func (a *statementExecutionImpl) CancelExecution(ctx context.Context, request CancelExecutionRequest) error { var cancelExecutionResponse CancelExecutionResponse path := fmt.Sprintf("/api/2.0preview/sql/statements/%v/cancel", request.StatementId) queryParams := make(map[string]any) @@ -752,7 +752,7 @@ func (a *statementExecutionPreviewImpl) CancelExecution(ctx context.Context, req return err } -func (a *statementExecutionPreviewImpl) ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*StatementResponse, error) { +func (a *statementExecutionImpl) ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*StatementResponse, error) { var statementResponse StatementResponse path := "/api/2.0preview/sql/statements/" queryParams := make(map[string]any) @@ -763,7 +763,7 @@ func (a *statementExecutionPreviewImpl) ExecuteStatement(ctx context.Context, re return &statementResponse, err } -func (a *statementExecutionPreviewImpl) GetStatement(ctx context.Context, request GetStatementRequest) (*StatementResponse, error) { +func (a *statementExecutionImpl) GetStatement(ctx context.Context, request GetStatementRequest) (*StatementResponse, error) { var statementResponse StatementResponse path := fmt.Sprintf("/api/2.0preview/sql/statements/%v", request.StatementId) queryParams := make(map[string]any) @@ -773,7 +773,7 @@ func (a *statementExecutionPreviewImpl) GetStatement(ctx context.Context, reques return &statementResponse, err } -func (a *statementExecutionPreviewImpl) GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error) { +func (a *statementExecutionImpl) GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error) { var resultData ResultData path := fmt.Sprintf("/api/2.0preview/sql/statements/%v/result/chunks/%v", request.StatementId, request.ChunkIndex) queryParams := make(map[string]any) @@ -783,12 +783,12 @@ func (a *statementExecutionPreviewImpl) GetStatementResultChunkN(ctx context.Con return &resultData, err } -// unexported type that holds implementations of just WarehousesPreview API methods -type warehousesPreviewImpl struct { +// unexported type that holds implementations of just Warehouses API methods +type warehousesImpl struct { client *client.DatabricksClient } -func (a *warehousesPreviewImpl) Create(ctx context.Context, request CreateWarehouseRequest) (*CreateWarehouseResponse, error) { +func (a *warehousesImpl) Create(ctx context.Context, request CreateWarehouseRequest) (*CreateWarehouseResponse, error) { var createWarehouseResponse CreateWarehouseResponse path := "/api/2.0preview/sql/warehouses" queryParams := make(map[string]any) @@ -799,7 +799,7 @@ func (a *warehousesPreviewImpl) Create(ctx context.Context, request CreateWareho return &createWarehouseResponse, err } -func (a *warehousesPreviewImpl) Delete(ctx context.Context, request DeleteWarehouseRequest) error { +func (a *warehousesImpl) Delete(ctx context.Context, request DeleteWarehouseRequest) error { var deleteWarehouseResponse DeleteWarehouseResponse path := fmt.Sprintf("/api/2.0preview/sql/warehouses/%v", request.Id) queryParams := make(map[string]any) @@ -809,7 +809,7 @@ func (a *warehousesPreviewImpl) Delete(ctx context.Context, request DeleteWareho return err } -func (a *warehousesPreviewImpl) Edit(ctx context.Context, request EditWarehouseRequest) error { +func (a *warehousesImpl) Edit(ctx context.Context, request EditWarehouseRequest) error { var editWarehouseResponse EditWarehouseResponse path := fmt.Sprintf("/api/2.0preview/sql/warehouses/%v/edit", request.Id) queryParams := make(map[string]any) @@ -820,7 +820,7 @@ func (a *warehousesPreviewImpl) Edit(ctx context.Context, request EditWarehouseR return err } -func (a *warehousesPreviewImpl) Get(ctx context.Context, request GetWarehouseRequest) (*GetWarehouseResponse, error) { +func (a *warehousesImpl) Get(ctx context.Context, request GetWarehouseRequest) (*GetWarehouseResponse, error) { var getWarehouseResponse GetWarehouseResponse path := fmt.Sprintf("/api/2.0preview/sql/warehouses/%v", request.Id) queryParams := make(map[string]any) @@ -830,7 +830,7 @@ func (a *warehousesPreviewImpl) Get(ctx context.Context, request GetWarehouseReq return &getWarehouseResponse, err } -func (a *warehousesPreviewImpl) GetPermissionLevels(ctx context.Context, request GetWarehousePermissionLevelsRequest) (*GetWarehousePermissionLevelsResponse, error) { +func (a *warehousesImpl) GetPermissionLevels(ctx context.Context, request GetWarehousePermissionLevelsRequest) (*GetWarehousePermissionLevelsResponse, error) { var getWarehousePermissionLevelsResponse GetWarehousePermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/warehouses/%v/permissionLevels", request.WarehouseId) queryParams := make(map[string]any) @@ -840,7 +840,7 @@ func (a *warehousesPreviewImpl) GetPermissionLevels(ctx context.Context, request return &getWarehousePermissionLevelsResponse, err } -func (a *warehousesPreviewImpl) GetPermissions(ctx context.Context, request GetWarehousePermissionsRequest) (*WarehousePermissions, error) { +func (a *warehousesImpl) GetPermissions(ctx context.Context, request GetWarehousePermissionsRequest) (*WarehousePermissions, error) { var warehousePermissions WarehousePermissions path := fmt.Sprintf("/api/2.0preview/permissions/warehouses/%v", request.WarehouseId) queryParams := make(map[string]any) @@ -850,7 +850,7 @@ func (a *warehousesPreviewImpl) GetPermissions(ctx context.Context, request GetW return &warehousePermissions, err } -func (a *warehousesPreviewImpl) GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error) { +func (a *warehousesImpl) GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error) { var getWorkspaceWarehouseConfigResponse GetWorkspaceWarehouseConfigResponse path := "/api/2.0preview/sql/config/warehouses" @@ -863,7 +863,7 @@ func (a *warehousesPreviewImpl) GetWorkspaceWarehouseConfig(ctx context.Context) // List warehouses. // // Lists all SQL warehouses that a user has manager permissions on. -func (a *warehousesPreviewImpl) List(ctx context.Context, request ListWarehousesRequest) listing.Iterator[EndpointInfo] { +func (a *warehousesImpl) List(ctx context.Context, request ListWarehousesRequest) listing.Iterator[EndpointInfo] { getNextPage := func(ctx context.Context, req ListWarehousesRequest) (*ListWarehousesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -884,11 +884,11 @@ func (a *warehousesPreviewImpl) List(ctx context.Context, request ListWarehouses // List warehouses. // // Lists all SQL warehouses that a user has manager permissions on. -func (a *warehousesPreviewImpl) ListAll(ctx context.Context, request ListWarehousesRequest) ([]EndpointInfo, error) { +func (a *warehousesImpl) ListAll(ctx context.Context, request ListWarehousesRequest) ([]EndpointInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[EndpointInfo](ctx, iterator) } -func (a *warehousesPreviewImpl) internalList(ctx context.Context, request ListWarehousesRequest) (*ListWarehousesResponse, error) { +func (a *warehousesImpl) internalList(ctx context.Context, request ListWarehousesRequest) (*ListWarehousesResponse, error) { var listWarehousesResponse ListWarehousesResponse path := "/api/2.0preview/sql/warehouses" queryParams := make(map[string]any) @@ -898,7 +898,7 @@ func (a *warehousesPreviewImpl) internalList(ctx context.Context, request ListWa return &listWarehousesResponse, err } -func (a *warehousesPreviewImpl) SetPermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error) { +func (a *warehousesImpl) SetPermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error) { var warehousePermissions WarehousePermissions path := fmt.Sprintf("/api/2.0preview/permissions/warehouses/%v", request.WarehouseId) queryParams := make(map[string]any) @@ -909,7 +909,7 @@ func (a *warehousesPreviewImpl) SetPermissions(ctx context.Context, request Ware return &warehousePermissions, err } -func (a *warehousesPreviewImpl) SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error { +func (a *warehousesImpl) SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error { var setWorkspaceWarehouseConfigResponse SetWorkspaceWarehouseConfigResponse path := "/api/2.0preview/sql/config/warehouses" queryParams := make(map[string]any) @@ -920,7 +920,7 @@ func (a *warehousesPreviewImpl) SetWorkspaceWarehouseConfig(ctx context.Context, return err } -func (a *warehousesPreviewImpl) Start(ctx context.Context, request StartRequest) error { +func (a *warehousesImpl) Start(ctx context.Context, request StartRequest) error { var startWarehouseResponse StartWarehouseResponse path := fmt.Sprintf("/api/2.0preview/sql/warehouses/%v/start", request.Id) queryParams := make(map[string]any) @@ -930,7 +930,7 @@ func (a *warehousesPreviewImpl) Start(ctx context.Context, request StartRequest) return err } -func (a *warehousesPreviewImpl) Stop(ctx context.Context, request StopRequest) error { +func (a *warehousesImpl) Stop(ctx context.Context, request StopRequest) error { var stopWarehouseResponse StopWarehouseResponse path := fmt.Sprintf("/api/2.0preview/sql/warehouses/%v/stop", request.Id) queryParams := make(map[string]any) @@ -940,7 +940,7 @@ func (a *warehousesPreviewImpl) Stop(ctx context.Context, request StopRequest) e return err } -func (a *warehousesPreviewImpl) UpdatePermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error) { +func (a *warehousesImpl) UpdatePermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error) { var warehousePermissions WarehousePermissions path := fmt.Sprintf("/api/2.0preview/permissions/warehouses/%v", request.WarehouseId) queryParams := make(map[string]any) diff --git a/vectorsearch/v2preview/api.go b/vectorsearch/v2preview/api.go index 94b194203..588bf1503 100755 --- a/vectorsearch/v2preview/api.go +++ b/vectorsearch/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Vector Search Endpoints Preview, Vector Search Indexes Preview, etc. +// These APIs allow you to manage Vector Search Endpoints, Vector Search Indexes, etc. package vectorsearchpreview import ( @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/listing" ) -type VectorSearchEndpointsPreviewInterface interface { +type VectorSearchEndpointsInterface interface { // Create an endpoint. // @@ -40,34 +40,34 @@ type VectorSearchEndpointsPreviewInterface interface { ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) } -func NewVectorSearchEndpointsPreview(client *client.DatabricksClient) *VectorSearchEndpointsPreviewAPI { - return &VectorSearchEndpointsPreviewAPI{ - vectorSearchEndpointsPreviewImpl: vectorSearchEndpointsPreviewImpl{ +func NewVectorSearchEndpoints(client *client.DatabricksClient) *VectorSearchEndpointsAPI { + return &VectorSearchEndpointsAPI{ + vectorSearchEndpointsImpl: vectorSearchEndpointsImpl{ client: client, }, } } // **Endpoint**: Represents the compute resources to host vector search indexes. -type VectorSearchEndpointsPreviewAPI struct { - vectorSearchEndpointsPreviewImpl +type VectorSearchEndpointsAPI struct { + vectorSearchEndpointsImpl } // Delete an endpoint. -func (a *VectorSearchEndpointsPreviewAPI) DeleteEndpointByEndpointName(ctx context.Context, endpointName string) error { - return a.vectorSearchEndpointsPreviewImpl.DeleteEndpoint(ctx, DeleteEndpointRequest{ +func (a *VectorSearchEndpointsAPI) DeleteEndpointByEndpointName(ctx context.Context, endpointName string) error { + return a.vectorSearchEndpointsImpl.DeleteEndpoint(ctx, DeleteEndpointRequest{ EndpointName: endpointName, }) } // Get an endpoint. -func (a *VectorSearchEndpointsPreviewAPI) GetEndpointByEndpointName(ctx context.Context, endpointName string) (*EndpointInfo, error) { - return a.vectorSearchEndpointsPreviewImpl.GetEndpoint(ctx, GetEndpointRequest{ +func (a *VectorSearchEndpointsAPI) GetEndpointByEndpointName(ctx context.Context, endpointName string) (*EndpointInfo, error) { + return a.vectorSearchEndpointsImpl.GetEndpoint(ctx, GetEndpointRequest{ EndpointName: endpointName, }) } -type VectorSearchIndexesPreviewInterface interface { +type VectorSearchIndexesInterface interface { // Create an index. // @@ -141,9 +141,9 @@ type VectorSearchIndexesPreviewInterface interface { UpsertDataVectorIndex(ctx context.Context, request UpsertDataVectorIndexRequest) (*UpsertDataVectorIndexResponse, error) } -func NewVectorSearchIndexesPreview(client *client.DatabricksClient) *VectorSearchIndexesPreviewAPI { - return &VectorSearchIndexesPreviewAPI{ - vectorSearchIndexesPreviewImpl: vectorSearchIndexesPreviewImpl{ +func NewVectorSearchIndexes(client *client.DatabricksClient) *VectorSearchIndexesAPI { + return &VectorSearchIndexesAPI{ + vectorSearchIndexesImpl: vectorSearchIndexesImpl{ client: client, }, } @@ -159,15 +159,15 @@ func NewVectorSearchIndexesPreview(client *client.DatabricksClient) *VectorSearc // changes. * **Direct Vector Access Index**: An index that supports direct read // and write of vectors and metadata through our REST and SDK APIs. With this // model, the user manages index updates. -type VectorSearchIndexesPreviewAPI struct { - vectorSearchIndexesPreviewImpl +type VectorSearchIndexesAPI struct { + vectorSearchIndexesImpl } // Delete an index. // // Delete an index. -func (a *VectorSearchIndexesPreviewAPI) DeleteIndexByIndexName(ctx context.Context, indexName string) error { - return a.vectorSearchIndexesPreviewImpl.DeleteIndex(ctx, DeleteIndexRequest{ +func (a *VectorSearchIndexesAPI) DeleteIndexByIndexName(ctx context.Context, indexName string) error { + return a.vectorSearchIndexesImpl.DeleteIndex(ctx, DeleteIndexRequest{ IndexName: indexName, }) } @@ -175,8 +175,8 @@ func (a *VectorSearchIndexesPreviewAPI) DeleteIndexByIndexName(ctx context.Conte // Get an index. // // Get an index. -func (a *VectorSearchIndexesPreviewAPI) GetIndexByIndexName(ctx context.Context, indexName string) (*VectorIndex, error) { - return a.vectorSearchIndexesPreviewImpl.GetIndex(ctx, GetIndexRequest{ +func (a *VectorSearchIndexesAPI) GetIndexByIndexName(ctx context.Context, indexName string) (*VectorIndex, error) { + return a.vectorSearchIndexesImpl.GetIndex(ctx, GetIndexRequest{ IndexName: indexName, }) } diff --git a/vectorsearch/v2preview/client.go b/vectorsearch/v2preview/client.go index caaa7769d..7852408df 100755 --- a/vectorsearch/v2preview/client.go +++ b/vectorsearch/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type VectorSearchEndpointsPreviewClient struct { - VectorSearchEndpointsPreviewInterface +type VectorSearchEndpointsClient struct { + VectorSearchEndpointsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewVectorSearchEndpointsPreviewClient(cfg *config.Config) (*VectorSearchEndpointsPreviewClient, error) { +func NewVectorSearchEndpointsClient(cfg *config.Config) (*VectorSearchEndpointsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewVectorSearchEndpointsPreviewClient(cfg *config.Config) (*VectorSearchEnd return nil, err } - return &VectorSearchEndpointsPreviewClient{ - Config: cfg, - apiClient: apiClient, - VectorSearchEndpointsPreviewInterface: NewVectorSearchEndpointsPreview(databricksClient), + return &VectorSearchEndpointsClient{ + Config: cfg, + apiClient: apiClient, + VectorSearchEndpointsInterface: NewVectorSearchEndpoints(databricksClient), }, nil } -type VectorSearchIndexesPreviewClient struct { - VectorSearchIndexesPreviewInterface +type VectorSearchIndexesClient struct { + VectorSearchIndexesInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewVectorSearchIndexesPreviewClient(cfg *config.Config) (*VectorSearchIndexesPreviewClient, error) { +func NewVectorSearchIndexesClient(cfg *config.Config) (*VectorSearchIndexesClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,9 +71,9 @@ func NewVectorSearchIndexesPreviewClient(cfg *config.Config) (*VectorSearchIndex return nil, err } - return &VectorSearchIndexesPreviewClient{ - Config: cfg, - apiClient: apiClient, - VectorSearchIndexesPreviewInterface: NewVectorSearchIndexesPreview(databricksClient), + return &VectorSearchIndexesClient{ + Config: cfg, + apiClient: apiClient, + VectorSearchIndexesInterface: NewVectorSearchIndexes(databricksClient), }, nil } diff --git a/vectorsearch/v2preview/impl.go b/vectorsearch/v2preview/impl.go index 9dfd88123..e7c91de98 100755 --- a/vectorsearch/v2preview/impl.go +++ b/vectorsearch/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just VectorSearchEndpointsPreview API methods -type vectorSearchEndpointsPreviewImpl struct { +// unexported type that holds implementations of just VectorSearchEndpoints API methods +type vectorSearchEndpointsImpl struct { client *client.DatabricksClient } -func (a *vectorSearchEndpointsPreviewImpl) CreateEndpoint(ctx context.Context, request CreateEndpoint) (*EndpointInfo, error) { +func (a *vectorSearchEndpointsImpl) CreateEndpoint(ctx context.Context, request CreateEndpoint) (*EndpointInfo, error) { var endpointInfo EndpointInfo path := "/api/2.0preview/vector-search/endpoints" queryParams := make(map[string]any) @@ -28,7 +28,7 @@ func (a *vectorSearchEndpointsPreviewImpl) CreateEndpoint(ctx context.Context, r return &endpointInfo, err } -func (a *vectorSearchEndpointsPreviewImpl) DeleteEndpoint(ctx context.Context, request DeleteEndpointRequest) error { +func (a *vectorSearchEndpointsImpl) DeleteEndpoint(ctx context.Context, request DeleteEndpointRequest) error { var deleteEndpointResponse DeleteEndpointResponse path := fmt.Sprintf("/api/2.0preview/vector-search/endpoints/%v", request.EndpointName) queryParams := make(map[string]any) @@ -37,7 +37,7 @@ func (a *vectorSearchEndpointsPreviewImpl) DeleteEndpoint(ctx context.Context, r return err } -func (a *vectorSearchEndpointsPreviewImpl) GetEndpoint(ctx context.Context, request GetEndpointRequest) (*EndpointInfo, error) { +func (a *vectorSearchEndpointsImpl) GetEndpoint(ctx context.Context, request GetEndpointRequest) (*EndpointInfo, error) { var endpointInfo EndpointInfo path := fmt.Sprintf("/api/2.0preview/vector-search/endpoints/%v", request.EndpointName) queryParams := make(map[string]any) @@ -48,7 +48,7 @@ func (a *vectorSearchEndpointsPreviewImpl) GetEndpoint(ctx context.Context, requ } // List all endpoints. -func (a *vectorSearchEndpointsPreviewImpl) ListEndpoints(ctx context.Context, request ListEndpointsRequest) listing.Iterator[EndpointInfo] { +func (a *vectorSearchEndpointsImpl) ListEndpoints(ctx context.Context, request ListEndpointsRequest) listing.Iterator[EndpointInfo] { getNextPage := func(ctx context.Context, req ListEndpointsRequest) (*ListEndpointResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -73,11 +73,11 @@ func (a *vectorSearchEndpointsPreviewImpl) ListEndpoints(ctx context.Context, re } // List all endpoints. -func (a *vectorSearchEndpointsPreviewImpl) ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) { +func (a *vectorSearchEndpointsImpl) ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) { iterator := a.ListEndpoints(ctx, request) return listing.ToSlice[EndpointInfo](ctx, iterator) } -func (a *vectorSearchEndpointsPreviewImpl) internalListEndpoints(ctx context.Context, request ListEndpointsRequest) (*ListEndpointResponse, error) { +func (a *vectorSearchEndpointsImpl) internalListEndpoints(ctx context.Context, request ListEndpointsRequest) (*ListEndpointResponse, error) { var listEndpointResponse ListEndpointResponse path := "/api/2.0preview/vector-search/endpoints" queryParams := make(map[string]any) @@ -87,12 +87,12 @@ func (a *vectorSearchEndpointsPreviewImpl) internalListEndpoints(ctx context.Con return &listEndpointResponse, err } -// unexported type that holds implementations of just VectorSearchIndexesPreview API methods -type vectorSearchIndexesPreviewImpl struct { +// unexported type that holds implementations of just VectorSearchIndexes API methods +type vectorSearchIndexesImpl struct { client *client.DatabricksClient } -func (a *vectorSearchIndexesPreviewImpl) CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*CreateVectorIndexResponse, error) { +func (a *vectorSearchIndexesImpl) CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*CreateVectorIndexResponse, error) { var createVectorIndexResponse CreateVectorIndexResponse path := "/api/2.0preview/vector-search/indexes" queryParams := make(map[string]any) @@ -103,7 +103,7 @@ func (a *vectorSearchIndexesPreviewImpl) CreateIndex(ctx context.Context, reques return &createVectorIndexResponse, err } -func (a *vectorSearchIndexesPreviewImpl) DeleteDataVectorIndex(ctx context.Context, request DeleteDataVectorIndexRequest) (*DeleteDataVectorIndexResponse, error) { +func (a *vectorSearchIndexesImpl) DeleteDataVectorIndex(ctx context.Context, request DeleteDataVectorIndexRequest) (*DeleteDataVectorIndexResponse, error) { var deleteDataVectorIndexResponse DeleteDataVectorIndexResponse path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/delete-data", request.IndexName) queryParams := make(map[string]any) @@ -114,7 +114,7 @@ func (a *vectorSearchIndexesPreviewImpl) DeleteDataVectorIndex(ctx context.Conte return &deleteDataVectorIndexResponse, err } -func (a *vectorSearchIndexesPreviewImpl) DeleteIndex(ctx context.Context, request DeleteIndexRequest) error { +func (a *vectorSearchIndexesImpl) DeleteIndex(ctx context.Context, request DeleteIndexRequest) error { var deleteIndexResponse DeleteIndexResponse path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v", request.IndexName) queryParams := make(map[string]any) @@ -123,7 +123,7 @@ func (a *vectorSearchIndexesPreviewImpl) DeleteIndex(ctx context.Context, reques return err } -func (a *vectorSearchIndexesPreviewImpl) GetIndex(ctx context.Context, request GetIndexRequest) (*VectorIndex, error) { +func (a *vectorSearchIndexesImpl) GetIndex(ctx context.Context, request GetIndexRequest) (*VectorIndex, error) { var vectorIndex VectorIndex path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v", request.IndexName) queryParams := make(map[string]any) @@ -136,7 +136,7 @@ func (a *vectorSearchIndexesPreviewImpl) GetIndex(ctx context.Context, request G // List indexes. // // List all indexes in the given endpoint. -func (a *vectorSearchIndexesPreviewImpl) ListIndexes(ctx context.Context, request ListIndexesRequest) listing.Iterator[MiniVectorIndex] { +func (a *vectorSearchIndexesImpl) ListIndexes(ctx context.Context, request ListIndexesRequest) listing.Iterator[MiniVectorIndex] { getNextPage := func(ctx context.Context, req ListIndexesRequest) (*ListVectorIndexesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -163,11 +163,11 @@ func (a *vectorSearchIndexesPreviewImpl) ListIndexes(ctx context.Context, reques // List indexes. // // List all indexes in the given endpoint. -func (a *vectorSearchIndexesPreviewImpl) ListIndexesAll(ctx context.Context, request ListIndexesRequest) ([]MiniVectorIndex, error) { +func (a *vectorSearchIndexesImpl) ListIndexesAll(ctx context.Context, request ListIndexesRequest) ([]MiniVectorIndex, error) { iterator := a.ListIndexes(ctx, request) return listing.ToSlice[MiniVectorIndex](ctx, iterator) } -func (a *vectorSearchIndexesPreviewImpl) internalListIndexes(ctx context.Context, request ListIndexesRequest) (*ListVectorIndexesResponse, error) { +func (a *vectorSearchIndexesImpl) internalListIndexes(ctx context.Context, request ListIndexesRequest) (*ListVectorIndexesResponse, error) { var listVectorIndexesResponse ListVectorIndexesResponse path := "/api/2.0preview/vector-search/indexes" queryParams := make(map[string]any) @@ -177,7 +177,7 @@ func (a *vectorSearchIndexesPreviewImpl) internalListIndexes(ctx context.Context return &listVectorIndexesResponse, err } -func (a *vectorSearchIndexesPreviewImpl) QueryIndex(ctx context.Context, request QueryVectorIndexRequest) (*QueryVectorIndexResponse, error) { +func (a *vectorSearchIndexesImpl) QueryIndex(ctx context.Context, request QueryVectorIndexRequest) (*QueryVectorIndexResponse, error) { var queryVectorIndexResponse QueryVectorIndexResponse path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/query", request.IndexName) queryParams := make(map[string]any) @@ -188,7 +188,7 @@ func (a *vectorSearchIndexesPreviewImpl) QueryIndex(ctx context.Context, request return &queryVectorIndexResponse, err } -func (a *vectorSearchIndexesPreviewImpl) QueryNextPage(ctx context.Context, request QueryVectorIndexNextPageRequest) (*QueryVectorIndexResponse, error) { +func (a *vectorSearchIndexesImpl) QueryNextPage(ctx context.Context, request QueryVectorIndexNextPageRequest) (*QueryVectorIndexResponse, error) { var queryVectorIndexResponse QueryVectorIndexResponse path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/query-next-page", request.IndexName) queryParams := make(map[string]any) @@ -199,7 +199,7 @@ func (a *vectorSearchIndexesPreviewImpl) QueryNextPage(ctx context.Context, requ return &queryVectorIndexResponse, err } -func (a *vectorSearchIndexesPreviewImpl) ScanIndex(ctx context.Context, request ScanVectorIndexRequest) (*ScanVectorIndexResponse, error) { +func (a *vectorSearchIndexesImpl) ScanIndex(ctx context.Context, request ScanVectorIndexRequest) (*ScanVectorIndexResponse, error) { var scanVectorIndexResponse ScanVectorIndexResponse path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/scan", request.IndexName) queryParams := make(map[string]any) @@ -210,7 +210,7 @@ func (a *vectorSearchIndexesPreviewImpl) ScanIndex(ctx context.Context, request return &scanVectorIndexResponse, err } -func (a *vectorSearchIndexesPreviewImpl) SyncIndex(ctx context.Context, request SyncIndexRequest) error { +func (a *vectorSearchIndexesImpl) SyncIndex(ctx context.Context, request SyncIndexRequest) error { var syncIndexResponse SyncIndexResponse path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/sync", request.IndexName) queryParams := make(map[string]any) @@ -219,7 +219,7 @@ func (a *vectorSearchIndexesPreviewImpl) SyncIndex(ctx context.Context, request return err } -func (a *vectorSearchIndexesPreviewImpl) UpsertDataVectorIndex(ctx context.Context, request UpsertDataVectorIndexRequest) (*UpsertDataVectorIndexResponse, error) { +func (a *vectorSearchIndexesImpl) UpsertDataVectorIndex(ctx context.Context, request UpsertDataVectorIndexRequest) (*UpsertDataVectorIndexResponse, error) { var upsertDataVectorIndexResponse UpsertDataVectorIndexResponse path := fmt.Sprintf("/api/2.0preview/vector-search/indexes/%v/upsert-data", request.IndexName) queryParams := make(map[string]any) diff --git a/workspace/v2preview/api.go b/workspace/v2preview/api.go index 77e0d0a35..5ea9b5020 100755 --- a/workspace/v2preview/api.go +++ b/workspace/v2preview/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Git Credentials Preview, Repos Preview, Secrets Preview, Workspace Preview, etc. +// These APIs allow you to manage Git Credentials, Repos, Secrets, Workspace, etc. package workspacepreview import ( @@ -12,7 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -type GitCredentialsPreviewInterface interface { +type GitCredentialsInterface interface { // Create a credential entry. // @@ -58,7 +58,7 @@ type GitCredentialsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context) ([]CredentialInfo, error) - // CredentialInfoGitProviderToCredentialIdMap calls [GitCredentialsPreviewAPI.ListAll] and creates a map of results with [CredentialInfo].GitProvider as key and [CredentialInfo].CredentialId as value. + // CredentialInfoGitProviderToCredentialIdMap calls [GitCredentialsAPI.ListAll] and creates a map of results with [CredentialInfo].GitProvider as key and [CredentialInfo].CredentialId as value. // // Returns an error if there's more than one [CredentialInfo] with the same .GitProvider. // @@ -67,7 +67,7 @@ type GitCredentialsPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. CredentialInfoGitProviderToCredentialIdMap(ctx context.Context) (map[string]int64, error) - // GetByGitProvider calls [GitCredentialsPreviewAPI.CredentialInfoGitProviderToCredentialIdMap] and returns a single [CredentialInfo]. + // GetByGitProvider calls [GitCredentialsAPI.CredentialInfoGitProviderToCredentialIdMap] and returns a single [CredentialInfo]. // // Returns an error if there's more than one [CredentialInfo] with the same .GitProvider. // @@ -82,9 +82,9 @@ type GitCredentialsPreviewInterface interface { Update(ctx context.Context, request UpdateCredentialsRequest) error } -func NewGitCredentialsPreview(client *client.DatabricksClient) *GitCredentialsPreviewAPI { - return &GitCredentialsPreviewAPI{ - gitCredentialsPreviewImpl: gitCredentialsPreviewImpl{ +func NewGitCredentials(client *client.DatabricksClient) *GitCredentialsAPI { + return &GitCredentialsAPI{ + gitCredentialsImpl: gitCredentialsImpl{ client: client, }, } @@ -96,15 +96,15 @@ func NewGitCredentialsPreview(client *client.DatabricksClient) *GitCredentialsPr // See [more info]. // // [more info]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html -type GitCredentialsPreviewAPI struct { - gitCredentialsPreviewImpl +type GitCredentialsAPI struct { + gitCredentialsImpl } // Delete a credential. // // Deletes the specified Git credential. -func (a *GitCredentialsPreviewAPI) DeleteByCredentialId(ctx context.Context, credentialId int64) error { - return a.gitCredentialsPreviewImpl.Delete(ctx, DeleteCredentialsRequest{ +func (a *GitCredentialsAPI) DeleteByCredentialId(ctx context.Context, credentialId int64) error { + return a.gitCredentialsImpl.Delete(ctx, DeleteCredentialsRequest{ CredentialId: credentialId, }) } @@ -112,20 +112,20 @@ func (a *GitCredentialsPreviewAPI) DeleteByCredentialId(ctx context.Context, cre // Get a credential entry. // // Gets the Git credential with the specified credential ID. -func (a *GitCredentialsPreviewAPI) GetByCredentialId(ctx context.Context, credentialId int64) (*GetCredentialsResponse, error) { - return a.gitCredentialsPreviewImpl.Get(ctx, GetCredentialsRequest{ +func (a *GitCredentialsAPI) GetByCredentialId(ctx context.Context, credentialId int64) (*GetCredentialsResponse, error) { + return a.gitCredentialsImpl.Get(ctx, GetCredentialsRequest{ CredentialId: credentialId, }) } -// CredentialInfoGitProviderToCredentialIdMap calls [GitCredentialsPreviewAPI.ListAll] and creates a map of results with [CredentialInfo].GitProvider as key and [CredentialInfo].CredentialId as value. +// CredentialInfoGitProviderToCredentialIdMap calls [GitCredentialsAPI.ListAll] and creates a map of results with [CredentialInfo].GitProvider as key and [CredentialInfo].CredentialId as value. // // Returns an error if there's more than one [CredentialInfo] with the same .GitProvider. // // Note: All [CredentialInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *GitCredentialsPreviewAPI) CredentialInfoGitProviderToCredentialIdMap(ctx context.Context) (map[string]int64, error) { +func (a *GitCredentialsAPI) CredentialInfoGitProviderToCredentialIdMap(ctx context.Context) (map[string]int64, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]int64{} result, err := a.ListAll(ctx) @@ -143,14 +143,14 @@ func (a *GitCredentialsPreviewAPI) CredentialInfoGitProviderToCredentialIdMap(ct return mapping, nil } -// GetByGitProvider calls [GitCredentialsPreviewAPI.CredentialInfoGitProviderToCredentialIdMap] and returns a single [CredentialInfo]. +// GetByGitProvider calls [GitCredentialsAPI.CredentialInfoGitProviderToCredentialIdMap] and returns a single [CredentialInfo]. // // Returns an error if there's more than one [CredentialInfo] with the same .GitProvider. // // Note: All [CredentialInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *GitCredentialsPreviewAPI) GetByGitProvider(ctx context.Context, name string) (*CredentialInfo, error) { +func (a *GitCredentialsAPI) GetByGitProvider(ctx context.Context, name string) (*CredentialInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx) if err != nil { @@ -171,7 +171,7 @@ func (a *GitCredentialsPreviewAPI) GetByGitProvider(ctx context.Context, name st return &alternatives[0], nil } -type ReposPreviewInterface interface { +type ReposInterface interface { // Create a repo. // @@ -238,7 +238,7 @@ type ReposPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListReposRequest) ([]RepoInfo, error) - // RepoInfoPathToIdMap calls [ReposPreviewAPI.ListAll] and creates a map of results with [RepoInfo].Path as key and [RepoInfo].Id as value. + // RepoInfoPathToIdMap calls [ReposAPI.ListAll] and creates a map of results with [RepoInfo].Path as key and [RepoInfo].Id as value. // // Returns an error if there's more than one [RepoInfo] with the same .Path. // @@ -247,7 +247,7 @@ type ReposPreviewInterface interface { // This method is generated by Databricks SDK Code Generator. RepoInfoPathToIdMap(ctx context.Context, request ListReposRequest) (map[string]int64, error) - // GetByPath calls [ReposPreviewAPI.RepoInfoPathToIdMap] and returns a single [RepoInfo]. + // GetByPath calls [ReposAPI.RepoInfoPathToIdMap] and returns a single [RepoInfo]. // // Returns an error if there's more than one [RepoInfo] with the same .Path. // @@ -276,9 +276,9 @@ type ReposPreviewInterface interface { UpdatePermissions(ctx context.Context, request RepoPermissionsRequest) (*RepoPermissions, error) } -func NewReposPreview(client *client.DatabricksClient) *ReposPreviewAPI { - return &ReposPreviewAPI{ - reposPreviewImpl: reposPreviewImpl{ +func NewRepos(client *client.DatabricksClient) *ReposAPI { + return &ReposAPI{ + reposImpl: reposImpl{ client: client, }, } @@ -294,15 +294,15 @@ func NewReposPreview(client *client.DatabricksClient) *ReposPreviewAPI { // Within Repos you can develop code in notebooks or other files and follow data // science and engineering code development best practices using Git for version // control, collaboration, and CI/CD. -type ReposPreviewAPI struct { - reposPreviewImpl +type ReposAPI struct { + reposImpl } // Delete a repo. // // Deletes the specified repo. -func (a *ReposPreviewAPI) DeleteByRepoId(ctx context.Context, repoId int64) error { - return a.reposPreviewImpl.Delete(ctx, DeleteRepoRequest{ +func (a *ReposAPI) DeleteByRepoId(ctx context.Context, repoId int64) error { + return a.reposImpl.Delete(ctx, DeleteRepoRequest{ RepoId: repoId, }) } @@ -310,8 +310,8 @@ func (a *ReposPreviewAPI) DeleteByRepoId(ctx context.Context, repoId int64) erro // Get a repo. // // Returns the repo with the given repo ID. -func (a *ReposPreviewAPI) GetByRepoId(ctx context.Context, repoId int64) (*GetRepoResponse, error) { - return a.reposPreviewImpl.Get(ctx, GetRepoRequest{ +func (a *ReposAPI) GetByRepoId(ctx context.Context, repoId int64) (*GetRepoResponse, error) { + return a.reposImpl.Get(ctx, GetRepoRequest{ RepoId: repoId, }) } @@ -319,8 +319,8 @@ func (a *ReposPreviewAPI) GetByRepoId(ctx context.Context, repoId int64) (*GetRe // Get repo permission levels. // // Gets the permission levels that a user can have on an object. -func (a *ReposPreviewAPI) GetPermissionLevelsByRepoId(ctx context.Context, repoId string) (*GetRepoPermissionLevelsResponse, error) { - return a.reposPreviewImpl.GetPermissionLevels(ctx, GetRepoPermissionLevelsRequest{ +func (a *ReposAPI) GetPermissionLevelsByRepoId(ctx context.Context, repoId string) (*GetRepoPermissionLevelsResponse, error) { + return a.reposImpl.GetPermissionLevels(ctx, GetRepoPermissionLevelsRequest{ RepoId: repoId, }) } @@ -329,20 +329,20 @@ func (a *ReposPreviewAPI) GetPermissionLevelsByRepoId(ctx context.Context, repoI // // Gets the permissions of a repo. Repos can inherit permissions from their root // object. -func (a *ReposPreviewAPI) GetPermissionsByRepoId(ctx context.Context, repoId string) (*RepoPermissions, error) { - return a.reposPreviewImpl.GetPermissions(ctx, GetRepoPermissionsRequest{ +func (a *ReposAPI) GetPermissionsByRepoId(ctx context.Context, repoId string) (*RepoPermissions, error) { + return a.reposImpl.GetPermissions(ctx, GetRepoPermissionsRequest{ RepoId: repoId, }) } -// RepoInfoPathToIdMap calls [ReposPreviewAPI.ListAll] and creates a map of results with [RepoInfo].Path as key and [RepoInfo].Id as value. +// RepoInfoPathToIdMap calls [ReposAPI.ListAll] and creates a map of results with [RepoInfo].Path as key and [RepoInfo].Id as value. // // Returns an error if there's more than one [RepoInfo] with the same .Path. // // Note: All [RepoInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *ReposPreviewAPI) RepoInfoPathToIdMap(ctx context.Context, request ListReposRequest) (map[string]int64, error) { +func (a *ReposAPI) RepoInfoPathToIdMap(ctx context.Context, request ListReposRequest) (map[string]int64, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]int64{} result, err := a.ListAll(ctx, request) @@ -360,14 +360,14 @@ func (a *ReposPreviewAPI) RepoInfoPathToIdMap(ctx context.Context, request ListR return mapping, nil } -// GetByPath calls [ReposPreviewAPI.RepoInfoPathToIdMap] and returns a single [RepoInfo]. +// GetByPath calls [ReposAPI.RepoInfoPathToIdMap] and returns a single [RepoInfo]. // // Returns an error if there's more than one [RepoInfo] with the same .Path. // // Note: All [RepoInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *ReposPreviewAPI) GetByPath(ctx context.Context, name string) (*RepoInfo, error) { +func (a *ReposAPI) GetByPath(ctx context.Context, name string) (*RepoInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListReposRequest{}) if err != nil { @@ -388,7 +388,7 @@ func (a *ReposPreviewAPI) GetByPath(ctx context.Context, name string) (*RepoInfo return &alternatives[0], nil } -type SecretsPreviewInterface interface { +type SecretsInterface interface { // Create a new secret scope. // @@ -608,9 +608,9 @@ type SecretsPreviewInterface interface { PutSecret(ctx context.Context, request PutSecret) error } -func NewSecretsPreview(client *client.DatabricksClient) *SecretsPreviewAPI { - return &SecretsPreviewAPI{ - secretsPreviewImpl: secretsPreviewImpl{ +func NewSecrets(client *client.DatabricksClient) *SecretsAPI { + return &SecretsAPI{ + secretsImpl: secretsImpl{ client: client, }, } @@ -628,8 +628,8 @@ func NewSecretsPreview(client *client.DatabricksClient) *SecretsPreviewAPI { // Databricks secrets. While Databricks makes an effort to redact secret values // that might be displayed in notebooks, it is not possible to prevent such // users from reading secrets. -type SecretsPreviewAPI struct { - secretsPreviewImpl +type SecretsAPI struct { + secretsImpl } // Delete a secret scope. @@ -639,8 +639,8 @@ type SecretsPreviewAPI struct { // Throws `RESOURCE_DOES_NOT_EXIST` if the scope does not exist. Throws // `PERMISSION_DENIED` if the user does not have permission to make this API // call. -func (a *SecretsPreviewAPI) DeleteScopeByScope(ctx context.Context, scope string) error { - return a.secretsPreviewImpl.DeleteScope(ctx, DeleteScope{ +func (a *SecretsAPI) DeleteScopeByScope(ctx context.Context, scope string) error { + return a.secretsImpl.DeleteScope(ctx, DeleteScope{ Scope: scope, }) } @@ -653,8 +653,8 @@ func (a *SecretsPreviewAPI) DeleteScopeByScope(ctx context.Context, scope string // Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws // `PERMISSION_DENIED` if the user does not have permission to make this API // call. -func (a *SecretsPreviewAPI) ListAclsByScope(ctx context.Context, scope string) (*ListAclsResponse, error) { - return a.secretsPreviewImpl.internalListAcls(ctx, ListAclsRequest{ +func (a *SecretsAPI) ListAclsByScope(ctx context.Context, scope string) (*ListAclsResponse, error) { + return a.secretsImpl.internalListAcls(ctx, ListAclsRequest{ Scope: scope, }) } @@ -669,13 +669,14 @@ func (a *SecretsPreviewAPI) ListAclsByScope(ctx context.Context, scope string) ( // `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws // `PERMISSION_DENIED` if the user does not have permission to make this API // call. -func (a *SecretsPreviewAPI) ListSecretsByScope(ctx context.Context, scope string) (*ListSecretsResponse, error) { - return a.secretsPreviewImpl.internalListSecrets(ctx, ListSecretsRequest{ +func (a *SecretsAPI) ListSecretsByScope(ctx context.Context, scope string) (*ListSecretsResponse, error) { + return a.secretsImpl.internalListSecrets(ctx, ListSecretsRequest{ Scope: scope, }) } -type WorkspacePreviewInterface interface { +type WorkspaceInterface interface { + workspaceAPIUtilities // Delete a workspace object. // @@ -763,7 +764,7 @@ type WorkspacePreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListWorkspaceRequest) ([]ObjectInfo, error) - // ObjectInfoPathToObjectIdMap calls [WorkspacePreviewAPI.ListAll] and creates a map of results with [ObjectInfo].Path as key and [ObjectInfo].ObjectId as value. + // ObjectInfoPathToObjectIdMap calls [WorkspaceAPI.ListAll] and creates a map of results with [ObjectInfo].Path as key and [ObjectInfo].ObjectId as value. // // Returns an error if there's more than one [ObjectInfo] with the same .Path. // @@ -772,7 +773,7 @@ type WorkspacePreviewInterface interface { // This method is generated by Databricks SDK Code Generator. ObjectInfoPathToObjectIdMap(ctx context.Context, request ListWorkspaceRequest) (map[string]int64, error) - // GetByPath calls [WorkspacePreviewAPI.ObjectInfoPathToObjectIdMap] and returns a single [ObjectInfo]. + // GetByPath calls [WorkspaceAPI.ObjectInfoPathToObjectIdMap] and returns a single [ObjectInfo]. // // Returns an error if there's more than one [ObjectInfo] with the same .Path. // @@ -815,9 +816,9 @@ type WorkspacePreviewInterface interface { UpdatePermissions(ctx context.Context, request WorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) } -func NewWorkspacePreview(client *client.DatabricksClient) *WorkspacePreviewAPI { - return &WorkspacePreviewAPI{ - workspacePreviewImpl: workspacePreviewImpl{ +func NewWorkspace(client *client.DatabricksClient) *WorkspaceAPI { + return &WorkspaceAPI{ + workspaceImpl: workspaceImpl{ client: client, }, } @@ -828,15 +829,15 @@ func NewWorkspacePreview(client *client.DatabricksClient) *WorkspacePreviewAPI { // // A notebook is a web-based interface to a document that contains runnable // code, visualizations, and explanatory text. -type WorkspacePreviewAPI struct { - workspacePreviewImpl +type WorkspaceAPI struct { + workspaceImpl } // Get workspace object permission levels. // // Gets the permission levels that a user can have on an object. -func (a *WorkspacePreviewAPI) GetPermissionLevelsByWorkspaceObjectTypeAndWorkspaceObjectId(ctx context.Context, workspaceObjectType string, workspaceObjectId string) (*GetWorkspaceObjectPermissionLevelsResponse, error) { - return a.workspacePreviewImpl.GetPermissionLevels(ctx, GetWorkspaceObjectPermissionLevelsRequest{ +func (a *WorkspaceAPI) GetPermissionLevelsByWorkspaceObjectTypeAndWorkspaceObjectId(ctx context.Context, workspaceObjectType string, workspaceObjectId string) (*GetWorkspaceObjectPermissionLevelsResponse, error) { + return a.workspaceImpl.GetPermissionLevels(ctx, GetWorkspaceObjectPermissionLevelsRequest{ WorkspaceObjectType: workspaceObjectType, WorkspaceObjectId: workspaceObjectId, }) @@ -846,8 +847,8 @@ func (a *WorkspacePreviewAPI) GetPermissionLevelsByWorkspaceObjectTypeAndWorkspa // // Gets the permissions of a workspace object. Workspace objects can inherit // permissions from their parent objects or root object. -func (a *WorkspacePreviewAPI) GetPermissionsByWorkspaceObjectTypeAndWorkspaceObjectId(ctx context.Context, workspaceObjectType string, workspaceObjectId string) (*WorkspaceObjectPermissions, error) { - return a.workspacePreviewImpl.GetPermissions(ctx, GetWorkspaceObjectPermissionsRequest{ +func (a *WorkspaceAPI) GetPermissionsByWorkspaceObjectTypeAndWorkspaceObjectId(ctx context.Context, workspaceObjectType string, workspaceObjectId string) (*WorkspaceObjectPermissions, error) { + return a.workspaceImpl.GetPermissions(ctx, GetWorkspaceObjectPermissionsRequest{ WorkspaceObjectType: workspaceObjectType, WorkspaceObjectId: workspaceObjectId, }) @@ -857,20 +858,20 @@ func (a *WorkspacePreviewAPI) GetPermissionsByWorkspaceObjectTypeAndWorkspaceObj // // Gets the status of an object or a directory. If `path` does not exist, this // call returns an error `RESOURCE_DOES_NOT_EXIST`. -func (a *WorkspacePreviewAPI) GetStatusByPath(ctx context.Context, path string) (*ObjectInfo, error) { - return a.workspacePreviewImpl.GetStatus(ctx, GetStatusRequest{ +func (a *WorkspaceAPI) GetStatusByPath(ctx context.Context, path string) (*ObjectInfo, error) { + return a.workspaceImpl.GetStatus(ctx, GetStatusRequest{ Path: path, }) } -// ObjectInfoPathToObjectIdMap calls [WorkspacePreviewAPI.ListAll] and creates a map of results with [ObjectInfo].Path as key and [ObjectInfo].ObjectId as value. +// ObjectInfoPathToObjectIdMap calls [WorkspaceAPI.ListAll] and creates a map of results with [ObjectInfo].Path as key and [ObjectInfo].ObjectId as value. // // Returns an error if there's more than one [ObjectInfo] with the same .Path. // // Note: All [ObjectInfo] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *WorkspacePreviewAPI) ObjectInfoPathToObjectIdMap(ctx context.Context, request ListWorkspaceRequest) (map[string]int64, error) { +func (a *WorkspaceAPI) ObjectInfoPathToObjectIdMap(ctx context.Context, request ListWorkspaceRequest) (map[string]int64, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]int64{} result, err := a.ListAll(ctx, request) @@ -888,14 +889,14 @@ func (a *WorkspacePreviewAPI) ObjectInfoPathToObjectIdMap(ctx context.Context, r return mapping, nil } -// GetByPath calls [WorkspacePreviewAPI.ObjectInfoPathToObjectIdMap] and returns a single [ObjectInfo]. +// GetByPath calls [WorkspaceAPI.ObjectInfoPathToObjectIdMap] and returns a single [ObjectInfo]. // // Returns an error if there's more than one [ObjectInfo] with the same .Path. // // Note: All [ObjectInfo] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *WorkspacePreviewAPI) GetByPath(ctx context.Context, name string) (*ObjectInfo, error) { +func (a *WorkspaceAPI) GetByPath(ctx context.Context, name string) (*ObjectInfo, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAll(ctx, ListWorkspaceRequest{}) if err != nil { @@ -924,8 +925,8 @@ func (a *WorkspacePreviewAPI) GetByPath(ctx context.Context, name string) (*Obje // // Note that if this operation fails it may have succeeded in creating some of // the necessary parent directories. -func (a *WorkspacePreviewAPI) MkdirsByPath(ctx context.Context, path string) error { - return a.workspacePreviewImpl.Mkdirs(ctx, Mkdirs{ +func (a *WorkspaceAPI) MkdirsByPath(ctx context.Context, path string) error { + return a.workspaceImpl.Mkdirs(ctx, Mkdirs{ Path: path, }) } diff --git a/workspace/v2preview/client.go b/workspace/v2preview/client.go index 778d6675a..f3f4fa12c 100755 --- a/workspace/v2preview/client.go +++ b/workspace/v2preview/client.go @@ -10,13 +10,13 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/httpclient" ) -type GitCredentialsPreviewClient struct { - GitCredentialsPreviewInterface +type GitCredentialsClient struct { + GitCredentialsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewGitCredentialsPreviewClient(cfg *config.Config) (*GitCredentialsPreviewClient, error) { +func NewGitCredentialsClient(cfg *config.Config) (*GitCredentialsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -37,20 +37,20 @@ func NewGitCredentialsPreviewClient(cfg *config.Config) (*GitCredentialsPreviewC return nil, err } - return &GitCredentialsPreviewClient{ - Config: cfg, - apiClient: apiClient, - GitCredentialsPreviewInterface: NewGitCredentialsPreview(databricksClient), + return &GitCredentialsClient{ + Config: cfg, + apiClient: apiClient, + GitCredentialsInterface: NewGitCredentials(databricksClient), }, nil } -type ReposPreviewClient struct { - ReposPreviewInterface +type ReposClient struct { + ReposInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewReposPreviewClient(cfg *config.Config) (*ReposPreviewClient, error) { +func NewReposClient(cfg *config.Config) (*ReposClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -71,20 +71,20 @@ func NewReposPreviewClient(cfg *config.Config) (*ReposPreviewClient, error) { return nil, err } - return &ReposPreviewClient{ - Config: cfg, - apiClient: apiClient, - ReposPreviewInterface: NewReposPreview(databricksClient), + return &ReposClient{ + Config: cfg, + apiClient: apiClient, + ReposInterface: NewRepos(databricksClient), }, nil } -type SecretsPreviewClient struct { - SecretsPreviewInterface +type SecretsClient struct { + SecretsInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewSecretsPreviewClient(cfg *config.Config) (*SecretsPreviewClient, error) { +func NewSecretsClient(cfg *config.Config) (*SecretsClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -105,20 +105,20 @@ func NewSecretsPreviewClient(cfg *config.Config) (*SecretsPreviewClient, error) return nil, err } - return &SecretsPreviewClient{ - Config: cfg, - apiClient: apiClient, - SecretsPreviewInterface: NewSecretsPreview(databricksClient), + return &SecretsClient{ + Config: cfg, + apiClient: apiClient, + SecretsInterface: NewSecrets(databricksClient), }, nil } -type WorkspacePreviewClient struct { - WorkspacePreviewInterface +type WorkspaceClient struct { + WorkspaceInterface Config *config.Config apiClient *httpclient.ApiClient } -func NewWorkspacePreviewClient(cfg *config.Config) (*WorkspacePreviewClient, error) { +func NewWorkspaceClient(cfg *config.Config) (*WorkspaceClient, error) { if cfg == nil { cfg = &config.Config{} } @@ -139,9 +139,9 @@ func NewWorkspacePreviewClient(cfg *config.Config) (*WorkspacePreviewClient, err return nil, err } - return &WorkspacePreviewClient{ - Config: cfg, - apiClient: apiClient, - WorkspacePreviewInterface: NewWorkspacePreview(databricksClient), + return &WorkspaceClient{ + Config: cfg, + apiClient: apiClient, + WorkspaceInterface: NewWorkspace(databricksClient), }, nil } diff --git a/workspace/v2preview/impl.go b/workspace/v2preview/impl.go index 1b28cd3dd..1cea89ba6 100755 --- a/workspace/v2preview/impl.go +++ b/workspace/v2preview/impl.go @@ -12,12 +12,12 @@ import ( "github.com/databricks/databricks-sdk-go/databricks/useragent" ) -// unexported type that holds implementations of just GitCredentialsPreview API methods -type gitCredentialsPreviewImpl struct { +// unexported type that holds implementations of just GitCredentials API methods +type gitCredentialsImpl struct { client *client.DatabricksClient } -func (a *gitCredentialsPreviewImpl) Create(ctx context.Context, request CreateCredentialsRequest) (*CreateCredentialsResponse, error) { +func (a *gitCredentialsImpl) Create(ctx context.Context, request CreateCredentialsRequest) (*CreateCredentialsResponse, error) { var createCredentialsResponse CreateCredentialsResponse path := "/api/2.0preview/git-credentials" queryParams := make(map[string]any) @@ -28,7 +28,7 @@ func (a *gitCredentialsPreviewImpl) Create(ctx context.Context, request CreateCr return &createCredentialsResponse, err } -func (a *gitCredentialsPreviewImpl) Delete(ctx context.Context, request DeleteCredentialsRequest) error { +func (a *gitCredentialsImpl) Delete(ctx context.Context, request DeleteCredentialsRequest) error { var deleteCredentialsResponse DeleteCredentialsResponse path := fmt.Sprintf("/api/2.0preview/git-credentials/%v", request.CredentialId) queryParams := make(map[string]any) @@ -38,7 +38,7 @@ func (a *gitCredentialsPreviewImpl) Delete(ctx context.Context, request DeleteCr return err } -func (a *gitCredentialsPreviewImpl) Get(ctx context.Context, request GetCredentialsRequest) (*GetCredentialsResponse, error) { +func (a *gitCredentialsImpl) Get(ctx context.Context, request GetCredentialsRequest) (*GetCredentialsResponse, error) { var getCredentialsResponse GetCredentialsResponse path := fmt.Sprintf("/api/2.0preview/git-credentials/%v", request.CredentialId) queryParams := make(map[string]any) @@ -52,7 +52,7 @@ func (a *gitCredentialsPreviewImpl) Get(ctx context.Context, request GetCredenti // // Lists the calling user's Git credentials. One credential per user is // supported. -func (a *gitCredentialsPreviewImpl) List(ctx context.Context) listing.Iterator[CredentialInfo] { +func (a *gitCredentialsImpl) List(ctx context.Context) listing.Iterator[CredentialInfo] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*ListCredentialsResponse, error) { @@ -75,11 +75,11 @@ func (a *gitCredentialsPreviewImpl) List(ctx context.Context) listing.Iterator[C // // Lists the calling user's Git credentials. One credential per user is // supported. -func (a *gitCredentialsPreviewImpl) ListAll(ctx context.Context) ([]CredentialInfo, error) { +func (a *gitCredentialsImpl) ListAll(ctx context.Context) ([]CredentialInfo, error) { iterator := a.List(ctx) return listing.ToSlice[CredentialInfo](ctx, iterator) } -func (a *gitCredentialsPreviewImpl) internalList(ctx context.Context) (*ListCredentialsResponse, error) { +func (a *gitCredentialsImpl) internalList(ctx context.Context) (*ListCredentialsResponse, error) { var listCredentialsResponse ListCredentialsResponse path := "/api/2.0preview/git-credentials" @@ -89,7 +89,7 @@ func (a *gitCredentialsPreviewImpl) internalList(ctx context.Context) (*ListCred return &listCredentialsResponse, err } -func (a *gitCredentialsPreviewImpl) Update(ctx context.Context, request UpdateCredentialsRequest) error { +func (a *gitCredentialsImpl) Update(ctx context.Context, request UpdateCredentialsRequest) error { var updateCredentialsResponse UpdateCredentialsResponse path := fmt.Sprintf("/api/2.0preview/git-credentials/%v", request.CredentialId) queryParams := make(map[string]any) @@ -100,12 +100,12 @@ func (a *gitCredentialsPreviewImpl) Update(ctx context.Context, request UpdateCr return err } -// unexported type that holds implementations of just ReposPreview API methods -type reposPreviewImpl struct { +// unexported type that holds implementations of just Repos API methods +type reposImpl struct { client *client.DatabricksClient } -func (a *reposPreviewImpl) Create(ctx context.Context, request CreateRepoRequest) (*CreateRepoResponse, error) { +func (a *reposImpl) Create(ctx context.Context, request CreateRepoRequest) (*CreateRepoResponse, error) { var createRepoResponse CreateRepoResponse path := "/api/2.0preview/repos" queryParams := make(map[string]any) @@ -116,7 +116,7 @@ func (a *reposPreviewImpl) Create(ctx context.Context, request CreateRepoRequest return &createRepoResponse, err } -func (a *reposPreviewImpl) Delete(ctx context.Context, request DeleteRepoRequest) error { +func (a *reposImpl) Delete(ctx context.Context, request DeleteRepoRequest) error { var deleteRepoResponse DeleteRepoResponse path := fmt.Sprintf("/api/2.0preview/repos/%v", request.RepoId) queryParams := make(map[string]any) @@ -126,7 +126,7 @@ func (a *reposPreviewImpl) Delete(ctx context.Context, request DeleteRepoRequest return err } -func (a *reposPreviewImpl) Get(ctx context.Context, request GetRepoRequest) (*GetRepoResponse, error) { +func (a *reposImpl) Get(ctx context.Context, request GetRepoRequest) (*GetRepoResponse, error) { var getRepoResponse GetRepoResponse path := fmt.Sprintf("/api/2.0preview/repos/%v", request.RepoId) queryParams := make(map[string]any) @@ -136,7 +136,7 @@ func (a *reposPreviewImpl) Get(ctx context.Context, request GetRepoRequest) (*Ge return &getRepoResponse, err } -func (a *reposPreviewImpl) GetPermissionLevels(ctx context.Context, request GetRepoPermissionLevelsRequest) (*GetRepoPermissionLevelsResponse, error) { +func (a *reposImpl) GetPermissionLevels(ctx context.Context, request GetRepoPermissionLevelsRequest) (*GetRepoPermissionLevelsResponse, error) { var getRepoPermissionLevelsResponse GetRepoPermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/repos/%v/permissionLevels", request.RepoId) queryParams := make(map[string]any) @@ -146,7 +146,7 @@ func (a *reposPreviewImpl) GetPermissionLevels(ctx context.Context, request GetR return &getRepoPermissionLevelsResponse, err } -func (a *reposPreviewImpl) GetPermissions(ctx context.Context, request GetRepoPermissionsRequest) (*RepoPermissions, error) { +func (a *reposImpl) GetPermissions(ctx context.Context, request GetRepoPermissionsRequest) (*RepoPermissions, error) { var repoPermissions RepoPermissions path := fmt.Sprintf("/api/2.0preview/permissions/repos/%v", request.RepoId) queryParams := make(map[string]any) @@ -160,7 +160,7 @@ func (a *reposPreviewImpl) GetPermissions(ctx context.Context, request GetRepoPe // // Returns repos that the calling user has Manage permissions on. Use // `next_page_token` to iterate through additional pages. -func (a *reposPreviewImpl) List(ctx context.Context, request ListReposRequest) listing.Iterator[RepoInfo] { +func (a *reposImpl) List(ctx context.Context, request ListReposRequest) listing.Iterator[RepoInfo] { getNextPage := func(ctx context.Context, req ListReposRequest) (*ListReposResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -188,11 +188,11 @@ func (a *reposPreviewImpl) List(ctx context.Context, request ListReposRequest) l // // Returns repos that the calling user has Manage permissions on. Use // `next_page_token` to iterate through additional pages. -func (a *reposPreviewImpl) ListAll(ctx context.Context, request ListReposRequest) ([]RepoInfo, error) { +func (a *reposImpl) ListAll(ctx context.Context, request ListReposRequest) ([]RepoInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[RepoInfo](ctx, iterator) } -func (a *reposPreviewImpl) internalList(ctx context.Context, request ListReposRequest) (*ListReposResponse, error) { +func (a *reposImpl) internalList(ctx context.Context, request ListReposRequest) (*ListReposResponse, error) { var listReposResponse ListReposResponse path := "/api/2.0preview/repos" queryParams := make(map[string]any) @@ -202,7 +202,7 @@ func (a *reposPreviewImpl) internalList(ctx context.Context, request ListReposRe return &listReposResponse, err } -func (a *reposPreviewImpl) SetPermissions(ctx context.Context, request RepoPermissionsRequest) (*RepoPermissions, error) { +func (a *reposImpl) SetPermissions(ctx context.Context, request RepoPermissionsRequest) (*RepoPermissions, error) { var repoPermissions RepoPermissions path := fmt.Sprintf("/api/2.0preview/permissions/repos/%v", request.RepoId) queryParams := make(map[string]any) @@ -213,7 +213,7 @@ func (a *reposPreviewImpl) SetPermissions(ctx context.Context, request RepoPermi return &repoPermissions, err } -func (a *reposPreviewImpl) Update(ctx context.Context, request UpdateRepoRequest) error { +func (a *reposImpl) Update(ctx context.Context, request UpdateRepoRequest) error { var updateRepoResponse UpdateRepoResponse path := fmt.Sprintf("/api/2.0preview/repos/%v", request.RepoId) queryParams := make(map[string]any) @@ -224,7 +224,7 @@ func (a *reposPreviewImpl) Update(ctx context.Context, request UpdateRepoRequest return err } -func (a *reposPreviewImpl) UpdatePermissions(ctx context.Context, request RepoPermissionsRequest) (*RepoPermissions, error) { +func (a *reposImpl) UpdatePermissions(ctx context.Context, request RepoPermissionsRequest) (*RepoPermissions, error) { var repoPermissions RepoPermissions path := fmt.Sprintf("/api/2.0preview/permissions/repos/%v", request.RepoId) queryParams := make(map[string]any) @@ -235,12 +235,12 @@ func (a *reposPreviewImpl) UpdatePermissions(ctx context.Context, request RepoPe return &repoPermissions, err } -// unexported type that holds implementations of just SecretsPreview API methods -type secretsPreviewImpl struct { +// unexported type that holds implementations of just Secrets API methods +type secretsImpl struct { client *client.DatabricksClient } -func (a *secretsPreviewImpl) CreateScope(ctx context.Context, request CreateScope) error { +func (a *secretsImpl) CreateScope(ctx context.Context, request CreateScope) error { var createScopeResponse CreateScopeResponse path := "/api/2.0preview/secrets/scopes/create" queryParams := make(map[string]any) @@ -251,7 +251,7 @@ func (a *secretsPreviewImpl) CreateScope(ctx context.Context, request CreateScop return err } -func (a *secretsPreviewImpl) DeleteAcl(ctx context.Context, request DeleteAcl) error { +func (a *secretsImpl) DeleteAcl(ctx context.Context, request DeleteAcl) error { var deleteAclResponse DeleteAclResponse path := "/api/2.0preview/secrets/acls/delete" queryParams := make(map[string]any) @@ -262,7 +262,7 @@ func (a *secretsPreviewImpl) DeleteAcl(ctx context.Context, request DeleteAcl) e return err } -func (a *secretsPreviewImpl) DeleteScope(ctx context.Context, request DeleteScope) error { +func (a *secretsImpl) DeleteScope(ctx context.Context, request DeleteScope) error { var deleteScopeResponse DeleteScopeResponse path := "/api/2.0preview/secrets/scopes/delete" queryParams := make(map[string]any) @@ -273,7 +273,7 @@ func (a *secretsPreviewImpl) DeleteScope(ctx context.Context, request DeleteScop return err } -func (a *secretsPreviewImpl) DeleteSecret(ctx context.Context, request DeleteSecret) error { +func (a *secretsImpl) DeleteSecret(ctx context.Context, request DeleteSecret) error { var deleteSecretResponse DeleteSecretResponse path := "/api/2.0preview/secrets/delete" queryParams := make(map[string]any) @@ -284,7 +284,7 @@ func (a *secretsPreviewImpl) DeleteSecret(ctx context.Context, request DeleteSec return err } -func (a *secretsPreviewImpl) GetAcl(ctx context.Context, request GetAclRequest) (*AclItem, error) { +func (a *secretsImpl) GetAcl(ctx context.Context, request GetAclRequest) (*AclItem, error) { var aclItem AclItem path := "/api/2.0preview/secrets/acls/get" queryParams := make(map[string]any) @@ -294,7 +294,7 @@ func (a *secretsPreviewImpl) GetAcl(ctx context.Context, request GetAclRequest) return &aclItem, err } -func (a *secretsPreviewImpl) GetSecret(ctx context.Context, request GetSecretRequest) (*GetSecretResponse, error) { +func (a *secretsImpl) GetSecret(ctx context.Context, request GetSecretRequest) (*GetSecretResponse, error) { var getSecretResponse GetSecretResponse path := "/api/2.0preview/secrets/get" queryParams := make(map[string]any) @@ -312,7 +312,7 @@ func (a *secretsPreviewImpl) GetSecret(ctx context.Context, request GetSecretReq // Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws // `PERMISSION_DENIED` if the user does not have permission to make this API // call. -func (a *secretsPreviewImpl) ListAcls(ctx context.Context, request ListAclsRequest) listing.Iterator[AclItem] { +func (a *secretsImpl) ListAcls(ctx context.Context, request ListAclsRequest) listing.Iterator[AclItem] { getNextPage := func(ctx context.Context, req ListAclsRequest) (*ListAclsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -338,11 +338,11 @@ func (a *secretsPreviewImpl) ListAcls(ctx context.Context, request ListAclsReque // Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws // `PERMISSION_DENIED` if the user does not have permission to make this API // call. -func (a *secretsPreviewImpl) ListAclsAll(ctx context.Context, request ListAclsRequest) ([]AclItem, error) { +func (a *secretsImpl) ListAclsAll(ctx context.Context, request ListAclsRequest) ([]AclItem, error) { iterator := a.ListAcls(ctx, request) return listing.ToSlice[AclItem](ctx, iterator) } -func (a *secretsPreviewImpl) internalListAcls(ctx context.Context, request ListAclsRequest) (*ListAclsResponse, error) { +func (a *secretsImpl) internalListAcls(ctx context.Context, request ListAclsRequest) (*ListAclsResponse, error) { var listAclsResponse ListAclsResponse path := "/api/2.0preview/secrets/acls/list" queryParams := make(map[string]any) @@ -358,7 +358,7 @@ func (a *secretsPreviewImpl) internalListAcls(ctx context.Context, request ListA // // Throws `PERMISSION_DENIED` if the user does not have permission to make this // API call. -func (a *secretsPreviewImpl) ListScopes(ctx context.Context) listing.Iterator[SecretScope] { +func (a *secretsImpl) ListScopes(ctx context.Context) listing.Iterator[SecretScope] { request := struct{}{} getNextPage := func(ctx context.Context, req struct{}) (*ListScopesResponse, error) { @@ -383,11 +383,11 @@ func (a *secretsPreviewImpl) ListScopes(ctx context.Context) listing.Iterator[Se // // Throws `PERMISSION_DENIED` if the user does not have permission to make this // API call. -func (a *secretsPreviewImpl) ListScopesAll(ctx context.Context) ([]SecretScope, error) { +func (a *secretsImpl) ListScopesAll(ctx context.Context) ([]SecretScope, error) { iterator := a.ListScopes(ctx) return listing.ToSlice[SecretScope](ctx, iterator) } -func (a *secretsPreviewImpl) internalListScopes(ctx context.Context) (*ListScopesResponse, error) { +func (a *secretsImpl) internalListScopes(ctx context.Context) (*ListScopesResponse, error) { var listScopesResponse ListScopesResponse path := "/api/2.0preview/secrets/scopes/list" @@ -407,7 +407,7 @@ func (a *secretsPreviewImpl) internalListScopes(ctx context.Context) (*ListScope // `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws // `PERMISSION_DENIED` if the user does not have permission to make this API // call. -func (a *secretsPreviewImpl) ListSecrets(ctx context.Context, request ListSecretsRequest) listing.Iterator[SecretMetadata] { +func (a *secretsImpl) ListSecrets(ctx context.Context, request ListSecretsRequest) listing.Iterator[SecretMetadata] { getNextPage := func(ctx context.Context, req ListSecretsRequest) (*ListSecretsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -435,11 +435,11 @@ func (a *secretsPreviewImpl) ListSecrets(ctx context.Context, request ListSecret // `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws // `PERMISSION_DENIED` if the user does not have permission to make this API // call. -func (a *secretsPreviewImpl) ListSecretsAll(ctx context.Context, request ListSecretsRequest) ([]SecretMetadata, error) { +func (a *secretsImpl) ListSecretsAll(ctx context.Context, request ListSecretsRequest) ([]SecretMetadata, error) { iterator := a.ListSecrets(ctx, request) return listing.ToSlice[SecretMetadata](ctx, iterator) } -func (a *secretsPreviewImpl) internalListSecrets(ctx context.Context, request ListSecretsRequest) (*ListSecretsResponse, error) { +func (a *secretsImpl) internalListSecrets(ctx context.Context, request ListSecretsRequest) (*ListSecretsResponse, error) { var listSecretsResponse ListSecretsResponse path := "/api/2.0preview/secrets/list" queryParams := make(map[string]any) @@ -449,7 +449,7 @@ func (a *secretsPreviewImpl) internalListSecrets(ctx context.Context, request Li return &listSecretsResponse, err } -func (a *secretsPreviewImpl) PutAcl(ctx context.Context, request PutAcl) error { +func (a *secretsImpl) PutAcl(ctx context.Context, request PutAcl) error { var putAclResponse PutAclResponse path := "/api/2.0preview/secrets/acls/put" queryParams := make(map[string]any) @@ -460,7 +460,7 @@ func (a *secretsPreviewImpl) PutAcl(ctx context.Context, request PutAcl) error { return err } -func (a *secretsPreviewImpl) PutSecret(ctx context.Context, request PutSecret) error { +func (a *secretsImpl) PutSecret(ctx context.Context, request PutSecret) error { var putSecretResponse PutSecretResponse path := "/api/2.0preview/secrets/put" queryParams := make(map[string]any) @@ -471,12 +471,12 @@ func (a *secretsPreviewImpl) PutSecret(ctx context.Context, request PutSecret) e return err } -// unexported type that holds implementations of just WorkspacePreview API methods -type workspacePreviewImpl struct { +// unexported type that holds implementations of just Workspace API methods +type workspaceImpl struct { client *client.DatabricksClient } -func (a *workspacePreviewImpl) Delete(ctx context.Context, request Delete) error { +func (a *workspaceImpl) Delete(ctx context.Context, request Delete) error { var deleteResponse DeleteResponse path := "/api/2.0preview/workspace/delete" queryParams := make(map[string]any) @@ -487,7 +487,7 @@ func (a *workspacePreviewImpl) Delete(ctx context.Context, request Delete) error return err } -func (a *workspacePreviewImpl) Export(ctx context.Context, request ExportRequest) (*ExportResponse, error) { +func (a *workspaceImpl) Export(ctx context.Context, request ExportRequest) (*ExportResponse, error) { var exportResponse ExportResponse path := "/api/2.0preview/workspace/export" queryParams := make(map[string]any) @@ -497,7 +497,7 @@ func (a *workspacePreviewImpl) Export(ctx context.Context, request ExportRequest return &exportResponse, err } -func (a *workspacePreviewImpl) GetPermissionLevels(ctx context.Context, request GetWorkspaceObjectPermissionLevelsRequest) (*GetWorkspaceObjectPermissionLevelsResponse, error) { +func (a *workspaceImpl) GetPermissionLevels(ctx context.Context, request GetWorkspaceObjectPermissionLevelsRequest) (*GetWorkspaceObjectPermissionLevelsResponse, error) { var getWorkspaceObjectPermissionLevelsResponse GetWorkspaceObjectPermissionLevelsResponse path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v/permissionLevels", request.WorkspaceObjectType, request.WorkspaceObjectId) queryParams := make(map[string]any) @@ -507,7 +507,7 @@ func (a *workspacePreviewImpl) GetPermissionLevels(ctx context.Context, request return &getWorkspaceObjectPermissionLevelsResponse, err } -func (a *workspacePreviewImpl) GetPermissions(ctx context.Context, request GetWorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { +func (a *workspaceImpl) GetPermissions(ctx context.Context, request GetWorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { var workspaceObjectPermissions WorkspaceObjectPermissions path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.WorkspaceObjectType, request.WorkspaceObjectId) queryParams := make(map[string]any) @@ -517,7 +517,7 @@ func (a *workspacePreviewImpl) GetPermissions(ctx context.Context, request GetWo return &workspaceObjectPermissions, err } -func (a *workspacePreviewImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*ObjectInfo, error) { +func (a *workspaceImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*ObjectInfo, error) { var objectInfo ObjectInfo path := "/api/2.0preview/workspace/get-status" queryParams := make(map[string]any) @@ -527,7 +527,7 @@ func (a *workspacePreviewImpl) GetStatus(ctx context.Context, request GetStatusR return &objectInfo, err } -func (a *workspacePreviewImpl) Import(ctx context.Context, request Import) error { +func (a *workspaceImpl) Import(ctx context.Context, request Import) error { var importResponse ImportResponse path := "/api/2.0preview/workspace/import" queryParams := make(map[string]any) @@ -543,7 +543,7 @@ func (a *workspacePreviewImpl) Import(ctx context.Context, request Import) error // Lists the contents of a directory, or the object if it is not a directory. If // the input path does not exist, this call returns an error // `RESOURCE_DOES_NOT_EXIST`. -func (a *workspacePreviewImpl) List(ctx context.Context, request ListWorkspaceRequest) listing.Iterator[ObjectInfo] { +func (a *workspaceImpl) List(ctx context.Context, request ListWorkspaceRequest) listing.Iterator[ObjectInfo] { getNextPage := func(ctx context.Context, req ListWorkspaceRequest) (*ListResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") @@ -566,11 +566,11 @@ func (a *workspacePreviewImpl) List(ctx context.Context, request ListWorkspaceRe // Lists the contents of a directory, or the object if it is not a directory. If // the input path does not exist, this call returns an error // `RESOURCE_DOES_NOT_EXIST`. -func (a *workspacePreviewImpl) ListAll(ctx context.Context, request ListWorkspaceRequest) ([]ObjectInfo, error) { +func (a *workspaceImpl) ListAll(ctx context.Context, request ListWorkspaceRequest) ([]ObjectInfo, error) { iterator := a.List(ctx, request) return listing.ToSlice[ObjectInfo](ctx, iterator) } -func (a *workspacePreviewImpl) internalList(ctx context.Context, request ListWorkspaceRequest) (*ListResponse, error) { +func (a *workspaceImpl) internalList(ctx context.Context, request ListWorkspaceRequest) (*ListResponse, error) { var listResponse ListResponse path := "/api/2.0preview/workspace/list" queryParams := make(map[string]any) @@ -580,7 +580,7 @@ func (a *workspacePreviewImpl) internalList(ctx context.Context, request ListWor return &listResponse, err } -func (a *workspacePreviewImpl) Mkdirs(ctx context.Context, request Mkdirs) error { +func (a *workspaceImpl) Mkdirs(ctx context.Context, request Mkdirs) error { var mkdirsResponse MkdirsResponse path := "/api/2.0preview/workspace/mkdirs" queryParams := make(map[string]any) @@ -591,7 +591,7 @@ func (a *workspacePreviewImpl) Mkdirs(ctx context.Context, request Mkdirs) error return err } -func (a *workspacePreviewImpl) SetPermissions(ctx context.Context, request WorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { +func (a *workspaceImpl) SetPermissions(ctx context.Context, request WorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { var workspaceObjectPermissions WorkspaceObjectPermissions path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.WorkspaceObjectType, request.WorkspaceObjectId) queryParams := make(map[string]any) @@ -602,7 +602,7 @@ func (a *workspacePreviewImpl) SetPermissions(ctx context.Context, request Works return &workspaceObjectPermissions, err } -func (a *workspacePreviewImpl) UpdatePermissions(ctx context.Context, request WorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { +func (a *workspaceImpl) UpdatePermissions(ctx context.Context, request WorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { var workspaceObjectPermissions WorkspaceObjectPermissions path := fmt.Sprintf("/api/2.0preview/permissions/%v/%v", request.WorkspaceObjectType, request.WorkspaceObjectId) queryParams := make(map[string]any) From e55938da3e23ff0048310dc90b9d94260a15d40c Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Tue, 11 Feb 2025 12:43:27 +0000 Subject: [PATCH 3/5] copy utilities --- compute/v2preview/ext_commands.go | 42 ++ compute/v2preview/ext_leading_whitespace.go | 36 ++ .../v2preview/ext_leading_whitespace_test.go | 16 + compute/v2preview/ext_library_utilities.go | 243 ++++++++++++ compute/v2preview/ext_node_type.go | 129 ++++++ compute/v2preview/ext_node_type_test.go | 206 ++++++++++ compute/v2preview/ext_results.go | 100 +++++ compute/v2preview/ext_results_test.go | 50 +++ compute/v2preview/ext_sort.go | 49 +++ compute/v2preview/ext_spark_version.go | 104 +++++ compute/v2preview/ext_utilities.go | 11 + files/v2preview/ext_utilities.go | 354 +++++++++++++++++ jobs/v2preview/ext_api.go | 34 ++ jobs/v2preview/ext_api_test.go | 371 ++++++++++++++++++ provisioning/v2preview/ext_azure.go | 13 + provisioning/v2preview/ext_azure_test.go | 25 ++ serving/v2preview/ext_data_plane.go | 85 ++++ serving/v2preview/ext_data_plane_test.go | 173 ++++++++ sql/v2preview/ext_utilities.go | 59 +++ workspace/v2preview/ext_utilities.go | 290 ++++++++++++++ 20 files changed, 2390 insertions(+) create mode 100644 compute/v2preview/ext_commands.go create mode 100644 compute/v2preview/ext_leading_whitespace.go create mode 100644 compute/v2preview/ext_leading_whitespace_test.go create mode 100644 compute/v2preview/ext_library_utilities.go create mode 100644 compute/v2preview/ext_node_type.go create mode 100644 compute/v2preview/ext_node_type_test.go create mode 100644 compute/v2preview/ext_results.go create mode 100644 compute/v2preview/ext_results_test.go create mode 100644 compute/v2preview/ext_sort.go create mode 100644 compute/v2preview/ext_spark_version.go create mode 100644 compute/v2preview/ext_utilities.go create mode 100644 files/v2preview/ext_utilities.go create mode 100644 jobs/v2preview/ext_api.go create mode 100644 jobs/v2preview/ext_api_test.go create mode 100644 provisioning/v2preview/ext_azure.go create mode 100644 provisioning/v2preview/ext_azure_test.go create mode 100644 serving/v2preview/ext_data_plane.go create mode 100644 serving/v2preview/ext_data_plane_test.go create mode 100644 sql/v2preview/ext_utilities.go create mode 100644 workspace/v2preview/ext_utilities.go diff --git a/compute/v2preview/ext_commands.go b/compute/v2preview/ext_commands.go new file mode 100644 index 000000000..a6da1e781 --- /dev/null +++ b/compute/v2preview/ext_commands.go @@ -0,0 +1,42 @@ +// TODO : Add the missing methods and implement the methods +// This file has not been completely shifted from the SDK-Beta +// as we still don't have the wait for state methods in the SDK-mod +package computepreview + +import ( + "context" +) + +type CommandExecutorV2 struct { + executionAPI *CommandExecutionAPI + clusterID string + contextID string +} + +type commandExecutionAPIUtilities interface { + // Start(ctx context.Context, clusterID string, language Language) (*CommandExecutorV2, error) +} + +// Start the command execution context on a cluster and ensure it transitions to a running state +func (c *CommandExecutorV2) Destroy(ctx context.Context) error { + return c.executionAPI.Destroy(ctx, DestroyContext{ + ClusterId: c.clusterID, + ContextId: c.contextID, + }) +} + +// CommandExecutor creates a spark context and executes a command and then closes context +type CommandExecutor interface { + Execute(ctx context.Context, clusterID, language, commandStr string) Results +} + +// CommandMock mocks the execution of command +type CommandMock func(commandStr string) Results + +func (m CommandMock) Execute(_ context.Context, _, _, commandStr string) Results { + return m(commandStr) +} + +// CommandsHighLevelAPI exposes more friendly wrapper over command execution +type CommandsHighLevelAPI struct { +} diff --git a/compute/v2preview/ext_leading_whitespace.go b/compute/v2preview/ext_leading_whitespace.go new file mode 100644 index 000000000..7c27fa3da --- /dev/null +++ b/compute/v2preview/ext_leading_whitespace.go @@ -0,0 +1,36 @@ +package computepreview + +import ( + "strings" +) + +// TrimLeadingWhitespace removes leading whitespace, so that Python code blocks +// that are embedded into Go code still could be interpreted properly. +func TrimLeadingWhitespace(commandStr string) (newCommand string) { + lines := strings.Split(strings.ReplaceAll(commandStr, "\t", " "), "\n") + leadingWhitespace := 1<<31 - 1 + for _, line := range lines { + for pos, char := range line { + if char == ' ' || char == '\t' { + continue + } + // first non-whitespace character + if pos < leadingWhitespace { + leadingWhitespace = pos + } + // is not needed further + break + } + } + for i := 0; i < len(lines); i++ { + if lines[i] == "" || strings.Trim(lines[i], " \t") == "" { + continue + } + if len(lines[i]) < leadingWhitespace { + newCommand += lines[i] + "\n" // or not.. + } else { + newCommand += lines[i][leadingWhitespace:] + "\n" + } + } + return +} diff --git a/compute/v2preview/ext_leading_whitespace_test.go b/compute/v2preview/ext_leading_whitespace_test.go new file mode 100644 index 000000000..484af50a6 --- /dev/null +++ b/compute/v2preview/ext_leading_whitespace_test.go @@ -0,0 +1,16 @@ +package computepreview + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTrimLeadingWhitespace(t *testing.T) { + assert.Equal(t, "foo\nbar\n", TrimLeadingWhitespace(` + + foo + bar + + `)) +} diff --git a/compute/v2preview/ext_library_utilities.go b/compute/v2preview/ext_library_utilities.go new file mode 100644 index 000000000..3edb0339c --- /dev/null +++ b/compute/v2preview/ext_library_utilities.go @@ -0,0 +1,243 @@ +package computepreview + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/databricks/databricks-sdk-go/databricks/apierr" + "github.com/databricks/databricks-sdk-go/databricks/log" + "github.com/databricks/databricks-sdk-go/databricks/retries" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +type Wait struct { + ClusterID string + Libraries []Library + IsRunning bool + IsRefresh bool +} + +func (library Library) String() string { + if library.Whl != "" { + return fmt.Sprintf("whl:%s", library.Whl) + } + if library.Jar != "" { + return fmt.Sprintf("jar:%s", library.Jar) + } + if library.Pypi != nil && library.Pypi.Package != "" { + return fmt.Sprintf("pypi:%s%s", library.Pypi.Repo, library.Pypi.Package) + } + if library.Maven != nil && library.Maven.Coordinates != "" { + mvn := library.Maven + return fmt.Sprintf("mvn:%s%s%s", mvn.Repo, mvn.Coordinates, + strings.Join(mvn.Exclusions, "")) + } + if library.Egg != "" { + return fmt.Sprintf("egg:%s", library.Egg) + } + if library.Cran != nil && library.Cran.Package != "" { + return fmt.Sprintf("cran:%s%s", library.Cran.Repo, library.Cran.Package) + } + return "unknown" +} + +func (cll *InstallLibraries) Sort() { + sort.Slice(cll.Libraries, func(i, j int) bool { + return cll.Libraries[i].String() < cll.Libraries[j].String() + }) +} + +// ToLibraryList convert to envity for convenient comparison +func (cls ClusterLibraryStatuses) ToLibraryList() InstallLibraries { + cll := InstallLibraries{ClusterId: cls.ClusterId} + for _, lib := range cls.LibraryStatuses { + cll.Libraries = append(cll.Libraries, *lib.Library) + } + cll.Sort() + return cll +} + +func (w *Wait) IsNotInScope(lib *Library) bool { + // if we don't know concrete libraries + if len(w.Libraries) == 0 { + return false + } + // if we know concrete libraries + for _, v := range w.Libraries { + if v.String() == lib.String() { + return false + } + } + return true +} + +// IsRetryNeeded returns first bool if there needs to be retry. +// If there needs to be retry, error message will explain why. +// If retry does not need to happen and error is not nil - it failed. +func (cls ClusterLibraryStatuses) IsRetryNeeded(w Wait) (bool, error) { + pending := 0 + ready := 0 + errors := []string{} + for _, lib := range cls.LibraryStatuses { + if lib.IsLibraryForAllClusters { + continue + } + if w.IsNotInScope(lib.Library) { + continue + } + switch lib.Status { + // No action has yet been taken to install the library. This state should be very short lived. + case "PENDING": + pending++ + // Metadata necessary to install the library is being retrieved from the provided repository. + case "RESOLVING": + pending++ + // The library is actively being installed, either by adding resources to Spark + // or executing system commands inside the Spark nodes. + case "INSTALLING": + pending++ + // The library has been successfully installed. + case "INSTALLED": + ready++ + // Installation on a Databricks Runtime 7.0 or above cluster was skipped due to Scala version incompatibility. + case "SKIPPED": + ready++ + // The library has been marked for removal. Libraries can be removed only when clusters are restarted. + case "UNINSTALL_ON_RESTART": + ready++ + //Some step in installation failed. More information can be found in the messages field. + case "FAILED": + if w.IsRefresh { + // we're reading library list on a running cluster and some of the libs failed to install + continue + } + errors = append(errors, fmt.Sprintf("%s failed: %s", lib.Library, strings.Join(lib.Messages, ", "))) + continue + } + } + if pending > 0 { + return true, fmt.Errorf("%d libraries are ready, but there are still %d pending", ready, pending) + } + if len(errors) > 0 { + return false, fmt.Errorf("%s", strings.Join(errors, "\n")) + } + return false, nil +} + +type Update struct { + ClusterId string + // The libraries to install. + Install []Library + // The libraries to install. + Uninstall []Library +} + +type librariesAPIUtilities interface { + UpdateAndWait(ctx context.Context, update Update, options ...retries.Option[ClusterLibraryStatuses]) error +} + +func (a *LibrariesAPI) UpdateAndWait(ctx context.Context, update Update, + options ...retries.Option[ClusterLibraryStatuses]) error { + ctx = useragent.InContext(ctx, "sdk-feature", "update-libraries") + if len(update.Uninstall) > 0 { + err := a.Uninstall(ctx, UninstallLibraries{ + ClusterId: update.ClusterId, + Libraries: update.Uninstall, + }) + if err != nil { + return fmt.Errorf("uninstall: %w", err) + } + } + if len(update.Install) > 0 { + err := a.Install(ctx, InstallLibraries{ + ClusterId: update.ClusterId, + Libraries: update.Install, + }) + if err != nil { + return fmt.Errorf("install: %w", err) + } + } + // this helps to avoid erroring out when out-of-list library gets added to + // the cluster manually and thereforce fails the wait on error + scope := make([]Library, len(update.Install)+len(update.Uninstall)) + scope = append(scope, update.Install...) + scope = append(scope, update.Uninstall...) + _, err := a.Wait(ctx, Wait{ + ClusterID: update.ClusterId, + Libraries: scope, + IsRunning: true, + IsRefresh: false, + }, options...) + return err +} + +// clusterID string, timeout time.Duration, isActive bool, refresh bool +func (a *LibrariesAPI) Wait(ctx context.Context, wait Wait, + options ...retries.Option[ClusterLibraryStatuses]) (*ClusterLibraryStatuses, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "wait-for-libraries") + i := retries.Info[ClusterLibraryStatuses]{Timeout: 30 * time.Minute} + for _, o := range options { + o(&i) + } + result, err := retries.Poll(ctx, i.Timeout, func() (*ClusterLibraryStatuses, *retries.Err) { + status, err := a.ClusterStatusByClusterId(ctx, wait.ClusterID) + if apierr.IsMissing(err) { + // eventual consistency error + return nil, retries.Continue(err) + } + for _, o := range options { + o(&retries.Info[ClusterLibraryStatuses]{ + Timeout: i.Timeout, + Info: status, + }) + } + if err != nil { + return nil, retries.Halt(err) + } + if !wait.IsRunning { + log.InfoContext(ctx, "Cluster %s is currently not running, so just returning list of %d libraries", + wait.ClusterID, len(status.LibraryStatuses)) + return status, nil + } + retry, err := status.IsRetryNeeded(wait) + if retry { + return status, retries.Continue(err) + } + if err != nil { + return status, retries.Halt(err) + } + return status, nil + }) + if err != nil { + return nil, err + } + if wait.IsRunning { + installed := []LibraryFullStatus{} + cleanup := UninstallLibraries{ + ClusterId: wait.ClusterID, + Libraries: []Library{}, + } + // cleanup libraries that failed to install + for _, v := range result.LibraryStatuses { + if v.Status == "FAILED" { + log.WarningContext(ctx, "Removing failed library %s from %s", + v.Library, wait.ClusterID) + cleanup.Libraries = append(cleanup.Libraries, *v.Library) + continue + } + installed = append(installed, v) + } + // and result contains only the libraries that were successfully installed + result.LibraryStatuses = installed + if len(cleanup.Libraries) > 0 { + err = a.Uninstall(ctx, cleanup) + if err != nil { + return nil, fmt.Errorf("cannot cleanup libraries: %w", err) + } + } + } + return result, nil +} diff --git a/compute/v2preview/ext_node_type.go b/compute/v2preview/ext_node_type.go new file mode 100644 index 000000000..6b0a8b7b4 --- /dev/null +++ b/compute/v2preview/ext_node_type.go @@ -0,0 +1,129 @@ +package computepreview + +import ( + "context" + "fmt" + "strings" +) + +// NodeTypeRequest is a wrapper for local filtering of node types +type NodeTypeRequest struct { + Id string `json:"id,omitempty"` + MinMemoryGB int32 `json:"min_memory_gb,omitempty"` + GBPerCore int32 `json:"gb_per_core,omitempty"` + MinCores int32 `json:"min_cores,omitempty"` + MinGPUs int32 `json:"min_gpus,omitempty"` + LocalDisk bool `json:"local_disk,omitempty"` + LocalDiskMinSize int32 `json:"local_disk_min_size,omitempty"` + Category string `json:"category,omitempty"` + PhotonWorkerCapable bool `json:"photon_worker_capable,omitempty"` + PhotonDriverCapable bool `json:"photon_driver_capable,omitempty"` + Graviton bool `json:"graviton,omitempty"` + IsIOCacheEnabled bool `json:"is_io_cache_enabled,omitempty"` + SupportPortForwarding bool `json:"support_port_forwarding,omitempty"` + Fleet bool `json:"fleet,omitempty"` +} + +// sort NodeTypes within this struct +func (ntl *ListNodeTypesResponse) sort() { + sortByChain(ntl.NodeTypes, func(i int) sortCmp { + var localDisks, localDiskSizeGB, localNVMeDisk, localNVMeDiskSizeGB int32 + if ntl.NodeTypes[i].NodeInstanceType != nil { + localDisks = int32(ntl.NodeTypes[i].NodeInstanceType.LocalDisks) + localNVMeDisk = int32(ntl.NodeTypes[i].NodeInstanceType.LocalNvmeDisks) + localDiskSizeGB = int32(ntl.NodeTypes[i].NodeInstanceType.LocalDiskSizeGb) + localNVMeDiskSizeGB = int32(ntl.NodeTypes[i].NodeInstanceType.LocalNvmeDiskSizeGb) + } + return sortChain{ + boolAsc(ntl.NodeTypes[i].IsDeprecated), + intAsc(ntl.NodeTypes[i].NumCores), + intAsc(ntl.NodeTypes[i].MemoryMb), + intAsc(localDisks), + intAsc(localDiskSizeGB), + intAsc(localNVMeDisk), + intAsc(localNVMeDiskSizeGB), + intAsc(ntl.NodeTypes[i].NumGpus), + strAsc(ntl.NodeTypes[i].InstanceTypeId), + } + }) +} + +func (nt NodeType) shouldBeSkipped() bool { + if nt.NodeInfo == nil { + return false + } + for _, st := range nt.NodeInfo.Status { + switch st { + case CloudProviderNodeStatusNotAvailableInRegion, CloudProviderNodeStatusNotEnabledOnSubscription: + return true + } + } + return false +} + +func (ntl *ListNodeTypesResponse) Smallest(r NodeTypeRequest) (string, error) { + // error is explicitly ingored here, because Azure returns + // apparently too big of a JSON for Go to parse + if len(ntl.NodeTypes) == 0 { + return "", fmt.Errorf("cannot determine smallest node type with empty response") + } + ntl.sort() + for _, nt := range ntl.NodeTypes { + if nt.shouldBeSkipped() { + continue + } + gbs := int32(nt.MemoryMb / 1024) + if r.Fleet != strings.Contains(nt.NodeTypeId, "-fleet.") { + continue + } + if r.MinMemoryGB > 0 && gbs < r.MinMemoryGB { + continue + } + if r.GBPerCore > 0 && (gbs/int32(nt.NumCores)) < r.GBPerCore { + continue + } + if r.MinCores > 0 && int32(nt.NumCores) < r.MinCores { + continue + } + if (r.MinGPUs > 0 && int32(nt.NumGpus) < r.MinGPUs) || (r.MinGPUs == 0 && nt.NumGpus > 0) { + continue + } + if (r.LocalDisk || r.LocalDiskMinSize > 0) && nt.NodeInstanceType != nil && + (nt.NodeInstanceType.LocalDisks < 1 && + nt.NodeInstanceType.LocalNvmeDisks < 1) { + continue + } + if r.LocalDiskMinSize > 0 && nt.NodeInstanceType != nil && + (int32(nt.NodeInstanceType.LocalDiskSizeGb)+int32(nt.NodeInstanceType.LocalNvmeDiskSizeGb)) < r.LocalDiskMinSize { + continue + } + if r.Category != "" && !strings.EqualFold(nt.Category, r.Category) { + continue + } + if r.IsIOCacheEnabled && !nt.IsIoCacheEnabled { + continue + } + if r.SupportPortForwarding && !nt.SupportPortForwarding { + continue + } + if r.PhotonDriverCapable && !nt.PhotonDriverCapable { + continue + } + if r.PhotonWorkerCapable && !nt.PhotonWorkerCapable { + continue + } + if nt.IsGraviton != r.Graviton { + continue + } + return nt.NodeTypeId, nil + } + return "", fmt.Errorf("cannot determine smallest node type") +} + +func (a *ClustersAPI) SelectNodeType(ctx context.Context, r NodeTypeRequest) (string, error) { + nodeTypes, err := a.ListNodeTypes(ctx) + if err != nil { + return "", err + } + return nodeTypes.Smallest(r) +} diff --git a/compute/v2preview/ext_node_type_test.go b/compute/v2preview/ext_node_type_test.go new file mode 100644 index 000000000..e6cb25e6b --- /dev/null +++ b/compute/v2preview/ext_node_type_test.go @@ -0,0 +1,206 @@ +package computepreview + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNodeType(t *testing.T) { + lst := ListNodeTypesResponse{ + NodeTypes: []NodeType{ + { + NodeTypeId: "m-fleet.xlarge", + InstanceTypeId: "m-fleet.xlarge", + MemoryMb: 16384, + NumCores: 4, + }, + { + NodeTypeId: "Random_05", + MemoryMb: 1024, + NumCores: 32, + NodeInstanceType: &NodeInstanceType{ + LocalDisks: 3, + LocalDiskSizeGb: 100, + }, + }, + { + NodeTypeId: "Standard_L80s_v2", + MemoryMb: 655360, + NumCores: 80, + NodeInstanceType: &NodeInstanceType{ + LocalDisks: 2, + InstanceTypeId: "Standard_L80s_v2", + LocalDiskSizeGb: 160, + LocalNvmeDisks: 1, + }, + }, + { + NodeTypeId: "Random_01", + MemoryMb: 8192, + NumCores: 8, + NodeInstanceType: &NodeInstanceType{ + InstanceTypeId: "_", + }, + }, + { + NodeTypeId: "Random_02", + MemoryMb: 8192, + NumCores: 8, + NumGpus: 2, + NodeInstanceType: &NodeInstanceType{ + InstanceTypeId: "_", + }, + }, + { + NodeTypeId: "Random_03", + MemoryMb: 8192, + NumCores: 8, + NumGpus: 1, + NodeInstanceType: &NodeInstanceType{ + InstanceTypeId: "_", + LocalNvmeDisks: 15, + LocalNvmeDiskSizeGb: 235, + }, + }, + { + NodeTypeId: "Random_04", + MemoryMb: 32000, + NumCores: 32, + IsDeprecated: true, + NodeInstanceType: &NodeInstanceType{ + LocalDisks: 2, + LocalDiskSizeGb: 20, + }, + }, + { + NodeTypeId: "Standard_F4s", + MemoryMb: 8192, + NumCores: 4, + NodeInstanceType: &NodeInstanceType{ + LocalDisks: 1, + LocalDiskSizeGb: 16, + LocalNvmeDisks: 0, + }, + }, + }, + } + nt, err := lst.Smallest(NodeTypeRequest{LocalDiskMinSize: 200, MinMemoryGB: 8, MinCores: 8, MinGPUs: 1}) + assert.NoError(t, err) + assert.Equal(t, "Random_03", nt) +} + +func TestNodeTypeCategory(t *testing.T) { + lst := ListNodeTypesResponse{ + NodeTypes: []NodeType{ + { + NodeTypeId: "Random_05", + InstanceTypeId: "Random_05", + MemoryMb: 1024, + NumCores: 32, + NodeInstanceType: &NodeInstanceType{ + LocalDisks: 3, + LocalDiskSizeGb: 100, + }, + }, + { + NodeTypeId: "Random_01", + InstanceTypeId: "Random_01", + MemoryMb: 8192, + NumCores: 8, + NodeInstanceType: &NodeInstanceType{ + InstanceTypeId: "_", + }, + Category: "Memory Optimized", + }, + { + NodeTypeId: "Random_02", + InstanceTypeId: "Random_02", + MemoryMb: 8192, + NumCores: 8, + Category: "Storage Optimized", + }, + }, + } + nt, err := lst.Smallest(NodeTypeRequest{Category: "Storage optimized"}) + assert.NoError(t, err) + assert.Equal(t, "Random_02", nt) +} + +func TestNodeTypeCategoryNotAvailable(t *testing.T) { + lst := ListNodeTypesResponse{ + NodeTypes: []NodeType{ + { + NodeTypeId: "Random_05", + InstanceTypeId: "Random_05", + MemoryMb: 1024, + NumCores: 32, + NodeInstanceType: &NodeInstanceType{ + LocalDisks: 3, + LocalDiskSizeGb: 100, + }, + }, + { + NodeTypeId: "Random_01", + InstanceTypeId: "Random_01", + MemoryMb: 8192, + NumCores: 8, + NodeInstanceType: &NodeInstanceType{ + InstanceTypeId: "_", + }, + NodeInfo: &CloudProviderNodeInfo{ + Status: []CloudProviderNodeStatus{ + CloudProviderNodeStatusNotAvailableInRegion, + CloudProviderNodeStatusNotEnabledOnSubscription}, + }, + Category: "Storage Optimized", + }, + { + NodeTypeId: "Random_02", + InstanceTypeId: "Random_02", + MemoryMb: 8192, + NumCores: 8, + Category: "Storage Optimized", + }, + }, + } + nt, err := lst.Smallest(NodeTypeRequest{Category: "Storage optimized"}) + assert.NoError(t, err) + assert.Equal(t, "Random_02", nt) +} + +func TestNodeTypeFleet(t *testing.T) { + lst := ListNodeTypesResponse{ + NodeTypes: []NodeType{ + { + NodeTypeId: "Random_05", + InstanceTypeId: "Random_05", + MemoryMb: 1024, + NumCores: 4, + }, + { + NodeTypeId: "m-fleet.xlarge", + InstanceTypeId: "m-fleet.xlarge", + MemoryMb: 16384, + NumCores: 4, + }, + { + NodeTypeId: "m-fleet.2xlarge", + InstanceTypeId: "m-fleet.2xlarge", + MemoryMb: 32768, + NumCores: 8, + }, + }, + } + nt, err := lst.Smallest(NodeTypeRequest{Fleet: true, MinCores: 8}) + assert.NoError(t, err) + assert.Equal(t, "m-fleet.2xlarge", nt) +} + +func TestNodeTypeEmptyList(t *testing.T) { + lst := ListNodeTypesResponse{ + NodeTypes: []NodeType{}, + } + _, err := lst.Smallest(NodeTypeRequest{Fleet: true}) + assert.ErrorContains(t, err, "cannot determine smallest node type with empty response") +} diff --git a/compute/v2preview/ext_results.go b/compute/v2preview/ext_results.go new file mode 100644 index 000000000..48d0e2c1e --- /dev/null +++ b/compute/v2preview/ext_results.go @@ -0,0 +1,100 @@ +package computepreview + +import ( + "errors" + "html" + "regexp" + "strings" +) + +var ( + // IPython's output prefixes + outRE = regexp.MustCompile(`Out\[[\d\s]+\]:\s`) + // HTML tags + tagRE = regexp.MustCompile(`<[^>]*>`) + // just exception content without exception name + exceptionRE = regexp.MustCompile(`.*Exception:\s+(.*)`) + // execution errors resulting from http errors are sometimes hidden in these keys + executionErrorRE = regexp.MustCompile(`ExecutionError: ([\s\S]*)\n(StatusCode=[0-9]*)\n(StatusDescription=.*)\n`) + // usual error message explanation is hidden in this key + errorMessageRE = regexp.MustCompile(`ErrorMessage=(.+)\n`) +) + +// Failed tells if command execution failed +func (r *Results) Failed() bool { + return r.ResultType == "error" +} + +// Text returns plain text results +func (r *Results) Text() string { + if r.ResultType != "text" { + return "" + } + return outRE.ReplaceAllLiteralString(r.Data.(string), "") +} + +// Err returns error type +func (r *Results) Err() error { + if !r.Failed() { + return nil + } + return errors.New(r.Error()) +} + +// Error returns error in a bit more friendly way +func (r *Results) Error() string { + if r.ResultType != "error" { + return "" + } + summary := tagRE.ReplaceAllLiteralString(r.Summary, "") + summary = html.UnescapeString(summary) + + exceptionMatches := exceptionRE.FindStringSubmatch(summary) + if len(exceptionMatches) == 2 { + summary = strings.ReplaceAll(exceptionMatches[1], "; nested exception is:", "") + summary = strings.TrimRight(summary, " ") + return summary + } + + executionErrorMatches := executionErrorRE.FindStringSubmatch(r.Cause) + if len(executionErrorMatches) == 4 { + return strings.Join(executionErrorMatches[1:], "\n") + } + + errorMessageMatches := errorMessageRE.FindStringSubmatch(r.Cause) + if len(errorMessageMatches) == 2 { + return errorMessageMatches[1] + } + + return summary +} + +// Scan scans for results +// TODO: change API, also in terraform (databricks_sql_permissions) +// for now we're adding `pos` field artificially. this must be removed +// before this repo is public. +func (r *Results) Scan(dest ...any) bool { + if r.ResultType != ResultTypeTable { + return false + } + if rows, ok := r.Data.([]any); ok { + if r.Pos >= len(rows) { + return false + } + if cols, ok := rows[r.Pos].([]any); ok { + for i := range dest { + switch d := dest[i].(type) { + case *string: + *d = cols[i].(string) + case *int: + *d = cols[i].(int) + case *bool: + *d = cols[i].(bool) + } + } + r.Pos++ + return true + } + } + return false +} diff --git a/compute/v2preview/ext_results_test.go b/compute/v2preview/ext_results_test.go new file mode 100644 index 000000000..7ed1d5197 --- /dev/null +++ b/compute/v2preview/ext_results_test.go @@ -0,0 +1,50 @@ +package computepreview + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestResults_Error(t *testing.T) { + cr := Results{} + assert.NoError(t, cr.Err()) + cr.ResultType = "error" + assert.EqualError(t, cr.Err(), "") + + cr.Summary = "NotFoundException: Things are going wrong; nested exception is: with something" + assert.Equal(t, "Things are going wrong with something", cr.Error()) + + cr.Summary = "" + cr.Cause = "ExecutionError: \nStatusCode=400\nStatusDescription=ABC\nSomething else" + assert.Equal(t, "\nStatusCode=400\nStatusDescription=ABC", cr.Error()) + + cr.Cause = "ErrorMessage=Error was here\n" + assert.Equal(t, "Error was here", cr.Error()) + + assert.False(t, cr.Scan()) +} + +func TestResults_Scan(t *testing.T) { + cr := Results{ + ResultType: "table", + Data: []interface{}{ + []interface{}{"foo", 1, true}, + []interface{}{"bar", 2, false}, + }, + } + a := "" + b := 0 + c := false + assert.True(t, cr.Scan(&a, &b, &c)) + assert.Equal(t, "foo", a) + assert.Equal(t, 1, b) + assert.Equal(t, true, c) + + assert.True(t, cr.Scan(&a, &b, &c)) + assert.Equal(t, "bar", a) + assert.Equal(t, 2, b) + assert.Equal(t, false, c) + + assert.False(t, cr.Scan(&a, &b, &c)) +} diff --git a/compute/v2preview/ext_sort.go b/compute/v2preview/ext_sort.go new file mode 100644 index 000000000..7304942b9 --- /dev/null +++ b/compute/v2preview/ext_sort.go @@ -0,0 +1,49 @@ +package computepreview + +import ( + "sort" +) + +// readable chained sorting helper +func sortByChain(s interface{}, fn func(int) sortCmp) { + sort.Slice(s, func(i, j int) bool { + return fn(i).Less(fn(j)) + }) +} + +type sortCmp interface { + Less(o sortCmp) bool +} + +type boolAsc bool + +func (b boolAsc) Less(o sortCmp) bool { + return bool(b) != bool(o.(boolAsc)) && !bool(b) +} + +type intAsc int + +func (ia intAsc) Less(o sortCmp) bool { + return int(ia) < int(o.(intAsc)) +} + +type strAsc string + +func (s strAsc) Less(o sortCmp) bool { + return string(s) < string(o.(strAsc)) +} + +type sortChain []sortCmp + +func (c sortChain) Less(other sortCmp) bool { + o := other.(sortChain) + for i := range c { + if c[i].Less(o[i]) { + return true + } + if o[i].Less(c[i]) { + break + } + } + return false +} diff --git a/compute/v2preview/ext_spark_version.go b/compute/v2preview/ext_spark_version.go new file mode 100644 index 000000000..c2aa9c26e --- /dev/null +++ b/compute/v2preview/ext_spark_version.go @@ -0,0 +1,104 @@ +package computepreview + +import ( + "context" + "fmt" + "regexp" + "sort" + "strings" + + "golang.org/x/mod/semver" +) + +// SparkVersionRequest - filtering request +type SparkVersionRequest struct { + Id string `json:"id,omitempty"` + LongTermSupport bool `json:"long_term_support,omitempty" tf:"optional,default:false"` + Beta bool `json:"beta,omitempty" tf:"optional,default:false,conflicts:long_term_support"` + Latest bool `json:"latest,omitempty" tf:"optional,default:true"` + ML bool `json:"ml,omitempty" tf:"optional,default:false"` + Genomics bool `json:"genomics,omitempty" tf:"optional,default:false"` + GPU bool `json:"gpu,omitempty" tf:"optional,default:false"` + Scala string `json:"scala,omitempty" tf:"optional,default:2.12"` + SparkVersion string `json:"spark_version,omitempty" tf:"optional,default:"` + Photon bool `json:"photon,omitempty" tf:"optional,default:false"` + Graviton bool `json:"graviton,omitempty" tf:"optional,default:false"` +} + +type sparkVersionsType []string + +func (s sparkVersionsType) Len() int { + return len(s) +} +func (s sparkVersionsType) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +var dbrVersionRegex = regexp.MustCompile(`^(\d+\.\d+)\.x-.*`) + +func extractDbrVersions(s string) string { + m := dbrVersionRegex.FindStringSubmatch(s) + if len(m) > 1 { + return m[1] + } + return s +} + +func (s sparkVersionsType) Less(i, j int) bool { + return semver.Compare("v"+extractDbrVersions(s[i]), "v"+extractDbrVersions(s[j])) > 0 +} + +func (sv GetSparkVersionsResponse) Select(req SparkVersionRequest) (string, error) { + var versions []string + for _, version := range sv.Versions { + if strings.Contains(version.Key, "-scala"+req.Scala) { + matches := ((!strings.Contains(version.Key, "apache-spark-")) && + (strings.Contains(version.Key, "-ml-") == req.ML) && + (strings.Contains(version.Key, "-hls-") == req.Genomics) && + (strings.Contains(version.Key, "-gpu-") == req.GPU) && + (strings.Contains(version.Key, "-photon-") == req.Photon) && + (strings.Contains(version.Key, "-aarch64-") == req.Graviton) && + (strings.Contains(version.Name, "Beta") == req.Beta)) + if matches && req.LongTermSupport { + matches = (matches && (strings.Contains(version.Name, "LTS") || strings.Contains(version.Key, "-esr-"))) + } + if matches && len(req.SparkVersion) > 0 { + matches = (matches && strings.Contains(version.Name, "Apache Spark "+req.SparkVersion)) + } + if matches { + versions = append(versions, version.Key) + } + } + } + if len(versions) < 1 { + return "", fmt.Errorf("spark versions query returned no results. Please change your search criteria and try again") + } else if len(versions) > 1 { + if req.Latest { + sort.Sort(sparkVersionsType(versions)) + } else { + return "", fmt.Errorf("spark versions query returned multiple results %#v. Please change your search criteria and try again", versions) + } + } + return versions[0], nil +} + +// SelectSparkVersion returns latest DBR version matching the request parameters. +// If there are multiple versions matching the request, it will error (if latest = false) +// or return the latest version. +// Possible parameters are: +// - LongTermSupport: LTS versions only +// - Beta: Beta versions only +// - ML: ML versions only +// - Genomics: Genomics versions only +// - GPU: GPU versions only +// - Scala: Scala version +// - SparkVersion: Apache Spark version +// - Photon: Photon versions only (deprecated) +// - Graviton: Graviton versions only (deprecated) +func (a *ClustersAPI) SelectSparkVersion(ctx context.Context, r SparkVersionRequest) (string, error) { + sv, err := a.SparkVersions(ctx) + if err != nil { + return "", err + } + return sv.Select(r) +} diff --git a/compute/v2preview/ext_utilities.go b/compute/v2preview/ext_utilities.go new file mode 100644 index 000000000..5190682fa --- /dev/null +++ b/compute/v2preview/ext_utilities.go @@ -0,0 +1,11 @@ +// TODO : Add the missing methods and implement the methods +// This file has not been completely shifted from the SDK-Beta +// as we still don't have the wait for state methods in the SDK-mod +package computepreview + +type clustersAPIUtilities interface { +} + +func (c *ClusterDetails) IsRunningOrResizing() bool { + return c.State == StateRunning || c.State == StateResizing +} diff --git a/files/v2preview/ext_utilities.go b/files/v2preview/ext_utilities.go new file mode 100644 index 000000000..6109fb292 --- /dev/null +++ b/files/v2preview/ext_utilities.go @@ -0,0 +1,354 @@ +package filespreview + +import ( + "bufio" + "context" + "encoding/base64" + "fmt" + "io" + + "github.com/databricks/databricks-sdk-go/databricks/apierr" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +// FileMode conveys user intent when opening a file. +type FileMode int + +const ( + // Exactly one of FileModeRead or FileModeWrite must be specified. + FileModeRead FileMode = 1 << iota + FileModeWrite + FileModeOverwrite +) + +// Maximum read or write length for the DBFS API. +const maxDbfsBlockSize = 1024 * 1024 + +// Internal only state for a read handle. +type fileHandleReader struct { + size int64 + offset int64 +} + +func (f *fileHandleReader) errorf(format string, a ...any) error { + return fmt.Errorf("dbfs read: "+format, a...) +} + +func (f *fileHandleReader) error(err error) error { + if err == nil { + return nil + } + return f.errorf("%w", err) +} + +// Internal only state for a write handle. +type fileHandleWriter struct { + handle int64 +} + +func (f *fileHandleWriter) errorf(format string, a ...any) error { + return fmt.Errorf("dbfs write: "+format, a...) +} + +func (f *fileHandleWriter) error(err error) error { + if err == nil { + return nil + } + return f.errorf("%w", err) +} + +// Internal only state for a DBFS file handle. +type fileHandle struct { + ctx context.Context + api *DbfsAPI + path string + + reader *fileHandleReader + writer *fileHandleWriter +} + +func (h *fileHandle) checkRead() (*fileHandleReader, error) { + if h.reader != nil { + return h.reader, nil + } + return nil, fmt.Errorf("dbfs: file not open for reading") +} + +func (h *fileHandle) checkWrite() (*fileHandleWriter, error) { + if h.writer != nil { + return h.writer, nil + } + return nil, fmt.Errorf("dbfs: file not open for writing") +} + +// Handle defines the interface of the object returned by [DbfsAPI.Open]. +type Handle interface { + io.ReadWriteCloser + io.WriterTo + io.ReaderFrom +} + +// Implement the [io.Reader] interface. +func (h *fileHandle) Read(p []byte) (int, error) { + r, err := h.checkRead() + if err != nil { + return 0, err + } + + var ntotal int + for ntotal < len(p) { + if r.offset >= r.size { + return ntotal, io.EOF + } + + chunk := p[ntotal:] + if len(chunk) > maxDbfsBlockSize { + chunk = chunk[:maxDbfsBlockSize] + } + + res, err := h.api.Read(h.ctx, ReadDbfsRequest{ + Path: h.path, + Length: int64(len(chunk)), + Offset: r.offset, + }) + if err != nil { + return ntotal, r.error(err) + } + + // The guard against offset >= size happens above, so this can only happen + // if the file is modified or truncated while reading. If this happens, + // the read contents will likely be corrupted, so we return an error. + if res.BytesRead == 0 { + return ntotal, r.errorf("unexpected EOF at offset %d (size %d)", r.offset, r.size) + } + + nread, err := base64.StdEncoding.Decode(chunk, []byte(res.Data)) + if err != nil { + return ntotal, r.error(err) + } + + ntotal += nread + r.offset += int64(nread) + } + + return ntotal, nil +} + +// Implement the [io.Writer] interface. +func (h *fileHandle) Write(p []byte) (int, error) { + w, err := h.checkWrite() + if err != nil { + return 0, err + } + + var ntotal int + for ntotal < len(p) { + chunk := p[ntotal:] + if len(chunk) > maxDbfsBlockSize { + chunk = chunk[:maxDbfsBlockSize] + } + + err := h.api.AddBlock(h.ctx, AddBlock{ + Data: base64.StdEncoding.EncodeToString(chunk), + Handle: w.handle, + }) + if err != nil { + return ntotal, w.error(err) + } + + ntotal += len(chunk) + } + + return ntotal, nil +} + +// Implement the [io.Closer] interface. +func (h *fileHandle) Close() error { + w, err := h.checkWrite() + if err != nil { + return err + } + + return w.error(h.api.CloseByHandle(h.ctx, w.handle)) +} + +// Implement the [io.WriterTo] interface. +func (h *fileHandle) WriteTo(w io.Writer) (int64, error) { + _, err := h.checkRead() + if err != nil { + return 0, err + } + + // Limit types to io.Reader and io.Writer to avoid recursion + // into WriteTo or ReadFrom functions on underlying types. + ior := struct{ io.Reader }{h} + iow := struct{ io.Writer }{w} + return bufio.NewReaderSize(ior, maxDbfsBlockSize).WriteTo(iow) +} + +// Implement the [io.ReaderFrom] interface. +func (h *fileHandle) ReadFrom(r io.Reader) (int64, error) { + _, err := h.checkWrite() + if err != nil { + return 0, err + } + + // Limit types to io.Reader and io.Writer to avoid recursion + // into WriteTo or ReadFrom functions on underlying types. + ior := struct{ io.Reader }{r} + iow := struct{ io.Writer }{h} + bw := bufio.NewWriterSize(iow, maxDbfsBlockSize) + n, err := bw.ReadFrom(ior) + if err != nil { + return n, err + } + return n, bw.Flush() +} + +func (h *fileHandle) openForRead(mode FileMode) error { + res, err := h.api.GetStatusByPath(h.ctx, h.path) + if err != nil { + return err + } + if res.IsDir { + return fmt.Errorf("cannot open directory for reading") + } + h.reader = &fileHandleReader{ + size: res.FileSize, + } + return nil +} + +func (h *fileHandle) openForWrite(mode FileMode) error { + res, err := h.api.Create(h.ctx, Create{ + Path: h.path, + Overwrite: (mode & FileModeOverwrite) != 0, + }) + if err != nil { + return err + } + h.writer = &fileHandleWriter{ + handle: res.Handle, + } + return nil +} + +type dbfsAPIUtilities interface { + // Open opens a remote DBFS file for reading or writing. + // The returned object implements relevant [io] interfaces for convenient + // integration with other code that reads or writes bytes. + // + // The [io.WriterTo] interface is provided and maximizes throughput for + // bulk reads by reading data with the DBFS maximum read chunk size of 1MB. + // Similarly, the [io.ReaderFrom] interface is provided for bulk writing. + // + // A file opened for writing must always be closed. + Open(ctx context.Context, path string, mode FileMode) (Handle, error) + + // ReadFile is identical to [os.ReadFile] but for DBFS. + ReadFile(ctx context.Context, name string) ([]byte, error) + + // WriteFile is identical to [os.WriteFile] but for DBFS. + WriteFile(ctx context.Context, name string, data []byte) error + + // RecursiveList traverses the DBFS tree and returns all non-directory + // objects under the path + RecursiveList(ctx context.Context, path string) ([]FileInfo, error) +} + +// Open opens a remote DBFS file for reading or writing. +// The returned object implements relevant [io] interfaces for convenient +// integration with other code that reads or writes bytes. +// +// The [io.WriterTo] interface is provided and maximizes throughput for +// bulk reads by reading data with the DBFS maximum read chunk size of 1MB. +// Similarly, the [io.ReaderFrom] interface is provided for bulk writing. +// +// A file opened for writing must always be closed. +func (a *DbfsAPI) Open(ctx context.Context, path string, mode FileMode) (Handle, error) { + h := &fileHandle{ + ctx: useragent.InContext(ctx, "sdk-feature", "dbfs-handle"), + api: a, + path: path, + } + + isRead := (mode & FileModeRead) != 0 + isWrite := (mode & FileModeWrite) != 0 + if (isRead && isWrite) || (!isRead && !isWrite) { + return nil, fmt.Errorf("dbfs open: must specify files.FileModeRead or files.FileModeWrite") + } + + var err error + if isRead { + err = h.openForRead(mode) + } + if isWrite { + err = h.openForWrite(mode) + } + if err != nil { + return nil, fmt.Errorf("dbfs open: %w", err) + } + + return h, nil +} + +// ReadFile is identical to [os.ReadFile] but for DBFS. +func (a *DbfsAPI) ReadFile(ctx context.Context, name string) ([]byte, error) { + h, err := a.Open(ctx, name, FileModeRead) + if err != nil { + return nil, err + } + + h_ := h.(*fileHandle) + buf := make([]byte, h_.reader.size) + _, err = h.Read(buf) + if err != nil && err != io.EOF { + return nil, err + } + return buf, nil +} + +// WriteFile is identical to [os.WriteFile] but for DBFS. +func (a *DbfsAPI) WriteFile(ctx context.Context, name string, data []byte) error { + h, err := a.Open(ctx, name, FileModeWrite|FileModeOverwrite) + if err != nil { + return err + } + + _, err = h.Write(data) + cerr := h.Close() + if err == nil && cerr != nil { + err = cerr + } + return err +} + +// RecursiveList traverses the DBFS tree and returns all non-directory +// objects under the path +func (a DbfsAPI) RecursiveList(ctx context.Context, path string) ([]FileInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "recursive-list") + var results []FileInfo + queue := []string{path} + for len(queue) > 0 { + path := queue[0] + queue = queue[1:] + batch, err := a.ListAll(ctx, ListDbfsRequest{ + Path: path, + }) + if apierr.IsMissing(err) { + // skip on path deleted during iteration + continue + } + if err != nil { + return nil, fmt.Errorf("list %s: %w", path, err) + } + for _, v := range batch { + if v.IsDir { + queue = append(queue, v.Path) + continue + } + results = append(results, v) + } + } + return results, nil +} diff --git a/jobs/v2preview/ext_api.go b/jobs/v2preview/ext_api.go new file mode 100644 index 000000000..f2ba5bc99 --- /dev/null +++ b/jobs/v2preview/ext_api.go @@ -0,0 +1,34 @@ +package jobspreview + +import "context" + +// GetRun retrieves a run based on the provided request. +// It handles pagination if the run contains multiple iterations or tasks. +func (a *JobsAPI) GetRun(ctx context.Context, request GetRunRequest) (*Run, error) { + run, err := a.jobsImpl.GetRun(ctx, request) + if err != nil { + return nil, err + } + + // When querying a Job run, a page token is returned when there are more than 100 tasks. No iterations are defined for a Job run. Therefore, the next page in the response only includes the next page of tasks. + // When querying a ForEach task run, a page token is returned when there are more than 100 iterations. Only a single task is returned, corresponding to the ForEach task itself. Therefore, the client only reads the iterations from the next page and not the tasks. + isPaginatingIterations := len(run.Iterations) > 0 + + pageToken := run.NextPageToken + for pageToken != "" { + request.PageToken = pageToken + nextRun, err := a.jobsImpl.GetRun(ctx, request) + if err != nil { + return nil, err + } + + if isPaginatingIterations { + run.Iterations = append(run.Iterations, nextRun.Iterations...) + } else { + run.Tasks = append(run.Tasks, nextRun.Tasks...) + } + pageToken = nextRun.NextPageToken + } + + return run, nil +} diff --git a/jobs/v2preview/ext_api_test.go b/jobs/v2preview/ext_api_test.go new file mode 100644 index 000000000..c9d3b2921 --- /dev/null +++ b/jobs/v2preview/ext_api_test.go @@ -0,0 +1,371 @@ +package jobs + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/databricks/qa" + + "github.com/stretchr/testify/assert" +) + +func TestGetRun(t *testing.T) { + ctx := context.Background() + + t.Run("run with no pagination", func(t *testing.T) { + var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.2/jobs/runs/get?run_id=514594995218126", + Response: Run{ + Iterations: []RunTask{}, + Tasks: []RunTask{ + { + RunId: 123, + TaskKey: "task1", + }, + { + RunId: 1234, + TaskKey: "task2", + }, + }, + NextPageToken: "", + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/jobs/runs/get?run_id=514594995218126", + Response: Run{ + Iterations: []RunTask{}, + Tasks: []RunTask{ + { + RunId: 123, + TaskKey: "task1", + }, + { + RunId: 1234, + TaskKey: "task2", + }, + }, + }, + }, + } + client, server := requestMocks.Client(t) + defer server.Close() + + mockJobsImpl := &jobsImpl{ + client: client, + } + api := &JobsAPI{jobsImpl: *mockJobsImpl} + + request := GetRunRequest{RunId: 514594995218126} + run, err := api.GetRun(ctx, request) + + assert.NoError(t, err) + assert.Equal(t, 2, len(run.Tasks)) + assert.Empty(t, run.Iterations) + assert.EqualValues(t, 123, run.Tasks[0].RunId) + assert.EqualValues(t, 1234, run.Tasks[1].RunId) + }) + + t.Run("run with two tasks pages", func(t *testing.T) { + var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.2/jobs/runs/get?run_id=111222333", + Response: Run{ + Iterations: []RunTask{}, + Tasks: []RunTask{ + { + RunId: 123, + }, + { + RunId: 1234, + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "cluster1", + }, + { + JobClusterKey: "cluster2", + }, + }, + NextPageToken: "token1", + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.2/jobs/runs/get?page_token=token1&run_id=111222333", + Response: Run{ + Iterations: []RunTask{}, + Tasks: []RunTask{ + { + RunId: 222, + }, + { + RunId: 333, + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "cluster1", + }, + { + JobClusterKey: "cluster2", + }, + }, + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/jobs/runs/get?run_id=111222333", + Response: Run{ + Iterations: []RunTask{}, + Tasks: []RunTask{ + { + RunId: 123, + }, + { + RunId: 1234, + }, + { + RunId: 222, + }, + { + RunId: 333, + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "cluster1", + }, + { + JobClusterKey: "cluster2", + }, + }, + }, + }, + } + client, server := requestMocks.Client(t) + defer server.Close() + + mockJobsImpl := &jobsImpl{ + client: client, + } + api := &JobsAPI{jobsImpl: *mockJobsImpl} + + request := GetRunRequest{RunId: 111222333} + run, err := api.GetRun(ctx, request) + + assert.NoError(t, err) + assert.Equal(t, 4, len(run.Tasks)) + assert.Empty(t, run.Iterations) + assert.Empty(t, run.NextPageToken) + expected := []RunTask{ + {RunId: 123, ForceSendFields: []string{"RunId", "TaskKey"}}, + {RunId: 1234, ForceSendFields: []string{"RunId", "TaskKey"}}, + {RunId: 222, ForceSendFields: []string{"RunId", "TaskKey"}}, + {RunId: 333, ForceSendFields: []string{"RunId", "TaskKey"}}, + } + assert.Equal(t, expected, run.Tasks) + }) + + t.Run("clusters array is not increased when paginated", func(t *testing.T) { + var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.2/jobs/runs/get?run_id=111222333", + Response: Run{ + Iterations: []RunTask{}, + Tasks: []RunTask{ + { + RunId: 123, + }, + { + RunId: 1234, + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "cluster1", + }, + { + JobClusterKey: "cluster2", + }, + }, + NextPageToken: "token1", + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.2/jobs/runs/get?page_token=token1&run_id=111222333", + Response: Run{ + Iterations: []RunTask{}, + Tasks: []RunTask{ + { + RunId: 222, + }, + { + RunId: 333, + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "cluster1", + }, + { + JobClusterKey: "cluster2", + }, + }, + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/jobs/runs/get?run_id=111222333", + Response: Run{ + Iterations: []RunTask{}, + Tasks: []RunTask{ + { + RunId: 123, + }, + { + RunId: 1234, + }, + { + RunId: 222, + }, + { + RunId: 333, + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "cluster1", + }, + { + JobClusterKey: "cluster2", + }, + }, + }, + }, + } + client, server := requestMocks.Client(t) + defer server.Close() + + mockJobsImpl := &jobsImpl{ + client: client, + } + api := &JobsAPI{jobsImpl: *mockJobsImpl} + + request := GetRunRequest{RunId: 111222333} + run, err := api.GetRun(ctx, request) + + assert.NoError(t, err) + assert.Equal(t, 2, len(run.JobClusters)) + assert.Equal(t, "cluster1", run.JobClusters[0].JobClusterKey) + assert.Equal(t, "cluster2", run.JobClusters[1].JobClusterKey) + }) + + t.Run("run with two iterations pages", func(t *testing.T) { + var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.2/jobs/runs/get?run_id=4444", + Response: Run{ + Iterations: []RunTask{ + { + RunId: 123, + }, + { + RunId: 1234, + }, + }, + Tasks: []RunTask{ + { + RunId: 999, + }, + }, + NextPageToken: "token1", + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.2/jobs/runs/get?page_token=token1&run_id=4444", + Response: Run{ + Iterations: []RunTask{ + { + RunId: 222, + }, + { + RunId: 333, + }, + }, + Tasks: []RunTask{ + { + RunId: 999, + }, + }, + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/jobs/runs/get?run_id=4444", + Response: Run{ + Iterations: []RunTask{ + { + RunId: 123, + }, + { + RunId: 1234, + }, + { + RunId: 222, + }, + { + RunId: 333, + }, + }, + Tasks: []RunTask{ + { + RunId: 999, + }, + }, + }, + }, + } + client, server := requestMocks.Client(t) + defer server.Close() + + mockJobsImpl := &jobsImpl{ + client: client, + } + api := &JobsAPI{jobsImpl: *mockJobsImpl} + + request := GetRunRequest{RunId: 4444} + run, err := api.GetRun(ctx, request) + + assert.NoError(t, err) + assert.Equal(t, 4, len(run.Iterations)) + assert.Equal(t, 1, len(run.Tasks)) + assert.Empty(t, run.NextPageToken) + expected := []RunTask{ + {RunId: 123, ForceSendFields: []string{"RunId", "TaskKey"}}, + {RunId: 1234, ForceSendFields: []string{"RunId", "TaskKey"}}, + {RunId: 222, ForceSendFields: []string{"RunId", "TaskKey"}}, + {RunId: 333, ForceSendFields: []string{"RunId", "TaskKey"}}, + } + assert.Equal(t, expected, run.Iterations) + assert.EqualValues(t, 999, run.Tasks[0].RunId) + }) +} diff --git a/provisioning/v2preview/ext_azure.go b/provisioning/v2preview/ext_azure.go new file mode 100644 index 000000000..f50a63f12 --- /dev/null +++ b/provisioning/v2preview/ext_azure.go @@ -0,0 +1,13 @@ +package provisioningpreview + +import "fmt" + +const resourceIdFormat = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Databricks/workspaces/%s" + +// Return the AzureResourceID for the workspace, if it is an Azure workspace. +func (w Workspace) AzureResourceId() string { + if w.AzureWorkspaceInfo == nil { + return "" + } + return fmt.Sprintf(resourceIdFormat, w.AzureWorkspaceInfo.SubscriptionId, w.AzureWorkspaceInfo.ResourceGroup, w.WorkspaceName) +} diff --git a/provisioning/v2preview/ext_azure_test.go b/provisioning/v2preview/ext_azure_test.go new file mode 100644 index 000000000..447efffb2 --- /dev/null +++ b/provisioning/v2preview/ext_azure_test.go @@ -0,0 +1,25 @@ +package provisioningpreview + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAzureResourceId_AzureWorkspace(t *testing.T) { + w := Workspace{ + WorkspaceName: "test", + AzureWorkspaceInfo: &AzureWorkspaceInfo{ + SubscriptionId: "sub", + ResourceGroup: "rg", + }, + } + assert.Equal(t, "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Databricks/workspaces/test", w.AzureResourceId()) +} + +func TestAzureResourceId_NonAzureWorkspace(t *testing.T) { + w := Workspace{ + WorkspaceName: "test", + } + assert.Equal(t, "", w.AzureResourceId()) +} diff --git a/serving/v2preview/ext_data_plane.go b/serving/v2preview/ext_data_plane.go new file mode 100644 index 000000000..cd5bbdf51 --- /dev/null +++ b/serving/v2preview/ext_data_plane.go @@ -0,0 +1,85 @@ +package servingpreview + +import ( + "strings" + "sync" + + goauth "golang.org/x/oauth2" +) + +// DataPlaneService is an interface for services that access DataPlane. +type DataPlaneService interface { + GetDataPlaneDetails(method string, params []string, refresh func(*DataPlaneInfo) (*goauth.Token, error), infoGetter func() (*DataPlaneInfo, error)) (string, *goauth.Token, error) +} + +func NewDataPlaneService() DataPlaneService { + return &dataPlaneServiceImpl{ + infos: make(map[string]*DataPlaneInfo), + tokens: make(map[string]*goauth.Token), + } +} + +type dataPlaneServiceImpl struct { + infos map[string]*DataPlaneInfo + tokens map[string]*goauth.Token + // This class can be shared across multiple threads. + // This mutex is used to synchronize access to the infos and tokens maps. + mu sync.Mutex +} + +// GetDataPlaneDetails returns the endpoint URL and token. It returns a cached token if it is valid, +// otherwise it refreshes the token and returns the new token. +func (o *dataPlaneServiceImpl) GetDataPlaneDetails(method string, params []string, refresh func(*DataPlaneInfo) (*goauth.Token, error), infoGetter func() (*DataPlaneInfo, error)) (string, *goauth.Token, error) { + key := o.generateKey(method, params) + info, err := o.getInfo(key, infoGetter) + if err != nil { + return "", nil, err + } + token, err := o.refreshToken(key, info, refresh) + if err != nil { + return "", nil, err + } + return info.EndpointUrl, token, nil +} + +func (o *dataPlaneServiceImpl) getInfo(key string, infoGetter func() (*DataPlaneInfo, error)) (*DataPlaneInfo, error) { + info, infoOk := o.infos[key] + if !infoOk { + o.mu.Lock() + defer o.mu.Unlock() + info, infoOk = o.infos[key] + if !infoOk { + newInfo, err := infoGetter() + if err != nil { + return nil, err + } + o.infos[key] = newInfo + info = newInfo + } + } + return info, nil +} + +func (o *dataPlaneServiceImpl) refreshToken(key string, info *DataPlaneInfo, refresh func(*DataPlaneInfo) (*goauth.Token, error)) (*goauth.Token, error) { + token, tokenOk := o.tokens[key] + if !tokenOk || !token.Valid() { + o.mu.Lock() + defer o.mu.Unlock() + token, tokenOk = o.tokens[key] + if !tokenOk || !token.Valid() { + newToken, err := refresh(info) + if err != nil { + return nil, err + } + o.tokens[key] = newToken + token = newToken + } + } + return token, nil +} + +func (o *dataPlaneServiceImpl) generateKey(method string, params []string) string { + allElements := []string{method} + allElements = append(allElements, params...) + return strings.Join(allElements, "/") +} diff --git a/serving/v2preview/ext_data_plane_test.go b/serving/v2preview/ext_data_plane_test.go new file mode 100644 index 000000000..2caf4b5b6 --- /dev/null +++ b/serving/v2preview/ext_data_plane_test.go @@ -0,0 +1,173 @@ +package servingpreview + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + goauth "golang.org/x/oauth2" +) + +type infoMock struct { + called bool + info *DataPlaneInfo + err error +} + +func (i *infoMock) DataPlaneInfoGetter() (*DataPlaneInfo, error) { + i.called = true + return i.info, i.err +} + +type tokenRefreshSpy struct { + called bool + expectedInfo *DataPlaneInfo + token *goauth.Token + err error +} + +func (t *tokenRefreshSpy) TokenRefresh(info *DataPlaneInfo) (*goauth.Token, error) { + t.expectedInfo = info + t.called = true + return t.token, t.err +} + +func TestTokenNotCached(t *testing.T) { + info := infoMock{ + info: &DataPlaneInfo{ + EndpointUrl: "url", + AuthorizationDetails: "authDetails", + }, + err: nil, + } + s := tokenRefreshSpy{ + token: &goauth.Token{ + AccessToken: "token", + TokenType: "type", + Expiry: time.Now().Add(1 * time.Hour), + }, + err: nil, + } + c := dataPlaneServiceImpl{ + infos: make(map[string]*DataPlaneInfo), + tokens: make(map[string]*goauth.Token), + } + url, token, err := c.GetDataPlaneDetails("method", []string{"params"}, s.TokenRefresh, info.DataPlaneInfoGetter) + assert.NoError(t, err) + assert.Equal(t, "url", url) + assert.Equal(t, "token", token.AccessToken) + assert.Equal(t, "type", token.TokenType) + assert.True(t, token.Valid()) + assert.True(t, info.called) + assert.True(t, s.called) +} + +func TestTokenCached(t *testing.T) { + info := infoMock{ + info: &DataPlaneInfo{ + EndpointUrl: "url", + AuthorizationDetails: "authDetails", + }, + err: nil, + } + s := tokenRefreshSpy{ + token: &goauth.Token{ + AccessToken: "token", + TokenType: "type", + Expiry: time.Now().Add(1 * time.Hour), + }, + err: nil, + } + c := dataPlaneServiceImpl{} + c.infos = make(map[string]*DataPlaneInfo) + c.tokens = make(map[string]*goauth.Token) + c.infos["method/params"] = info.info + c.tokens["method/params"] = s.token + url, token, err := c.GetDataPlaneDetails("method", []string{"params"}, s.TokenRefresh, info.DataPlaneInfoGetter) + assert.NoError(t, err) + assert.Equal(t, "url", url) + assert.Equal(t, "token", token.AccessToken) + assert.Equal(t, "type", token.TokenType) + assert.True(t, token.Valid()) + assert.False(t, info.called) + assert.False(t, s.called) +} + +func TestTokenExpired(t *testing.T) { + info := infoMock{ + info: &DataPlaneInfo{ + EndpointUrl: "url", + AuthorizationDetails: "authDetails", + }, + err: nil, + } + + expired := &goauth.Token{ + AccessToken: "oldToken", + TokenType: "type", + Expiry: time.Now().Add(-1 * time.Hour), + } + s := tokenRefreshSpy{ + token: &goauth.Token{ + AccessToken: "token", + TokenType: "type", + Expiry: time.Now().Add(1 * time.Hour), + }, + err: nil, + } + c := dataPlaneServiceImpl{} + c.infos = make(map[string]*DataPlaneInfo) + c.tokens = make(map[string]*goauth.Token) + c.infos["method/params"] = info.info + c.tokens["method/params"] = expired + url, token, err := c.GetDataPlaneDetails("method", []string{"params"}, s.TokenRefresh, info.DataPlaneInfoGetter) + assert.NoError(t, err) + assert.Equal(t, "url", url) + assert.Equal(t, "token", token.AccessToken) + assert.Equal(t, "type", token.TokenType) + assert.True(t, token.Valid()) + assert.False(t, info.called) + assert.True(t, s.called) +} + +func TestTokenInfoError(t *testing.T) { + info := infoMock{ + info: nil, + err: assert.AnError, + } + s := tokenRefreshSpy{} + c := dataPlaneServiceImpl{ + infos: make(map[string]*DataPlaneInfo), + tokens: make(map[string]*goauth.Token), + } + url, token, err := c.GetDataPlaneDetails("method", []string{"params"}, s.TokenRefresh, info.DataPlaneInfoGetter) + assert.ErrorIs(t, err, assert.AnError) + assert.Empty(t, url) + assert.Nil(t, token) + assert.True(t, info.called) + assert.False(t, s.called) +} + +func TestTokenRefreshError(t *testing.T) { + info := infoMock{ + info: &DataPlaneInfo{ + EndpointUrl: "url", + AuthorizationDetails: "authDetails", + }, + err: nil, + } + s := tokenRefreshSpy{ + token: nil, + err: assert.AnError, + } + c := dataPlaneServiceImpl{ + infos: make(map[string]*DataPlaneInfo), + tokens: make(map[string]*goauth.Token), + } + url, token, err := c.GetDataPlaneDetails("method", []string{"params"}, s.TokenRefresh, info.DataPlaneInfoGetter) + assert.ErrorIs(t, err, assert.AnError) + assert.Empty(t, url) + assert.Nil(t, token) + assert.True(t, info.called) + assert.True(t, s.called) +} diff --git a/sql/v2preview/ext_utilities.go b/sql/v2preview/ext_utilities.go new file mode 100644 index 000000000..98aaf0180 --- /dev/null +++ b/sql/v2preview/ext_utilities.go @@ -0,0 +1,59 @@ +package sqlpreview + +import ( + "context" + "fmt" + "time" + + "github.com/databricks/databricks-sdk-go/databricks/retries" +) + +type statementExecutionAPIUtilities interface { + ExecuteAndWait(ctx context.Context, request ExecuteStatementRequest) (*StatementResponse, error) +} + +// [EXPERIMENTAL] Execute a query and wait for results to be available +func (a *StatementExecutionAPI) ExecuteAndWait(ctx context.Context, request ExecuteStatementRequest) (*StatementResponse, error) { + immediateResponse, err := a.ExecuteStatement(ctx, request) + if err != nil { + return nil, err + } + status := immediateResponse.Status + switch status.State { + case StatementStateSucceeded: + return immediateResponse, nil + case StatementStateFailed, StatementStateCanceled, StatementStateClosed: + msg := status.State.String() + if status.Error != nil { + msg = fmt.Sprintf("%s: %s %s", msg, status.Error.ErrorCode, status.Error.Message) + } + return nil, fmt.Errorf("%s", msg) + default: + // TODO: parse request.WaitTimeout and use it here + return retries.Poll[StatementResponse](ctx, 20*time.Minute, + func() (*StatementResponse, *retries.Err) { + res, err := a.GetStatementByStatementId(ctx, immediateResponse.StatementId) + if err != nil { + return nil, retries.Halt(err) + } + status := res.Status + switch status.State { + case StatementStateSucceeded: + return &StatementResponse{ + Manifest: res.Manifest, + Result: res.Result, + StatementId: res.StatementId, + Status: res.Status, + }, nil + case StatementStateFailed, StatementStateCanceled, StatementStateClosed: + msg := status.State.String() + if status.Error != nil { + msg = fmt.Sprintf("%s: %s %s", msg, status.Error.ErrorCode, status.Error.Message) + } + return nil, retries.Halt(fmt.Errorf("%s", msg)) + default: + return nil, retries.Continues(status.State.String()) + } + }) + } +} diff --git a/workspace/v2preview/ext_utilities.go b/workspace/v2preview/ext_utilities.go new file mode 100644 index 000000000..463d76c5d --- /dev/null +++ b/workspace/v2preview/ext_utilities.go @@ -0,0 +1,290 @@ +package workspacepreview + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "mime/multipart" + "strings" + + "github.com/databricks/databricks-sdk-go/databricks/apierr" + "github.com/databricks/databricks-sdk-go/databricks/useragent" +) + +var b64 = base64.StdEncoding + +type workspaceAPIUtilities interface { + // Download a notebook or file from the workspace by path. + // + // By default, it acts as if workspace.DownloadFormat(workspace.ExportFormatSource) option is supplied. When using + // workspace.ExportFormatAuto, the `path` is imported or exported as either a workspace file or a notebook, depending + // on an analysis of the `item`’s extension and the file content header provided in the request. + // + // Returns [bytes.Buffer] of the path contents. + Download(ctx context.Context, path string, opts ...DownloadOption) (io.ReadCloser, error) + + // Upload a workspace object (for example, a notebook or file) or the contents + // of an entire directory (`DBC` format). + // + // Errors: + // + // - RESOURCE_ALREADY_EXISTS: if `path` already exists no `overwrite=True`. + // - INVALID_PARAMETER_VALUE: if `format` and `content` values are not compatible. + // + // By default, workspace.UploadFormat(workspace.ImportFormatSource). If using + // workspace.UploadFormat(workspace.ImportFormatAuto) the `path` is imported or + // exported as either a workspace file or a notebook, depending on an analysis + // of the `path`’s extension and the header content provided in the request. + // In addition, if the `path` is imported as a notebook, then the `item`’s + // extension is automatically removed. + // + // workspace.UploadLanguage(...) is only required if source format. + Upload(ctx context.Context, path string, r io.Reader, opts ...UploadOption) error + + // RecursiveList traverses the workspace tree and returns all non-directory + // objects under the path + RecursiveList(ctx context.Context, path string) ([]ObjectInfo, error) + + // WriteFile is identical to [os.WriteFile] but for Workspace File. + // Keep in mind: It doesn't upload the notebook, but the file and does + // always overwrite it. + WriteFile(ctx context.Context, name string, data []byte) error + + // ReadFile is identical to [os.ReadFile] but for workspace files. + ReadFile(ctx context.Context, name string) ([]byte, error) +} + +// PythonNotebookOverwrite crafts Python import notebook request +// also by trimming the code specified in the second argument +func PythonNotebookOverwrite(path, content string) Import { + content = TrimLeadingWhitespace(content) + request, _ := PythonNotebookOverwriteReader(path, + strings.NewReader(content)) + return request +} + +// TrimLeadingWhitespace removes leading whitespace, so that Python code blocks +// that are embedded into Go code still could be interpreted properly. +// TODO: for note this is from the compute package +func TrimLeadingWhitespace(commandStr string) (newCommand string) { + lines := strings.Split(strings.ReplaceAll(commandStr, "\t", " "), "\n") + leadingWhitespace := 1<<31 - 1 + for _, line := range lines { + for pos, char := range line { + if char == ' ' || char == '\t' { + continue + } + // first non-whitespace character + if pos < leadingWhitespace { + leadingWhitespace = pos + } + // is not needed further + break + } + } + for i := 0; i < len(lines); i++ { + if lines[i] == "" || strings.Trim(lines[i], " \t") == "" { + continue + } + if len(lines[i]) < leadingWhitespace { + newCommand += lines[i] + "\n" // or not.. + } else { + newCommand += lines[i][leadingWhitespace:] + "\n" + } + } + return +} + +func PythonNotebookOverwriteReader(path string, r io.Reader) (Import, error) { + raw, err := io.ReadAll(r) + if err != nil { + return Import{}, fmt.Errorf("read: %w", err) + } + return Import{ + Path: path, + Overwrite: true, + Format: ImportFormatSource, + Language: LanguagePython, + Content: b64.EncodeToString(raw), + }, nil +} + +func (r *ExportResponse) Bytes() ([]byte, error) { + return b64.DecodeString(r.Content) +} + +// RecursiveList traverses the workspace tree and returns all non-directory +// objects under the path +func (a *WorkspaceAPI) RecursiveList(ctx context.Context, path string) ([]ObjectInfo, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "recursive-list") + var results []ObjectInfo + queue := []string{path} + for len(queue) > 0 { + path := queue[0] + queue = queue[1:] + batch, err := a.ListAll(ctx, ListWorkspaceRequest{ + Path: path, + }) + if apierr.IsMissing(err) { + // skip on path deleted during iteration + continue + } + if err != nil { + return nil, fmt.Errorf("list %s: %w", path, err) + } + for _, v := range batch { + if v.ObjectType == ObjectTypeDirectory { + queue = append(queue, v.Path) + continue + } + results = append(results, v) + } + } + return results, nil +} + +type UploadOption = func(*Import) + +func UploadOverwrite() UploadOption { + return func(i *Import) { + i.Overwrite = true + } +} + +func UploadLanguage(l Language) UploadOption { + return func(i *Import) { + i.Language = l + } +} + +func UploadFormat(f ImportFormat) UploadOption { + return func(i *Import) { + i.Format = f + } +} + +// Upload a workspace object (for example, a notebook or file) or the contents +// of an entire directory (`DBC` format). +// +// Errors: +// +// - RESOURCE_ALREADY_EXISTS: if `path` already exists no `overwrite=True`. +// - INVALID_PARAMETER_VALUE: if `format` and `content` values are not compatible. +// +// By default, workspace.UploadFormat(workspace.ImportFormatSource). If using +// workspace.UploadFormat(workspace.ImportFormatAuto) the `path` is imported or +// exported as either a workspace file or a notebook, depending on an analysis +// of the `path`’s extension and the header content provided in the request. +// In addition, if the `path` is imported as a notebook, then the `item`’s +// extension is automatically removed. +// +// workspace.UploadLanguage(...) is only required if source format. +func (a *WorkspaceAPI) Upload(ctx context.Context, path string, r io.Reader, opts ...UploadOption) error { + buf := &bytes.Buffer{} + w := multipart.NewWriter(buf) + err := w.WriteField("path", path) + if err != nil { + return fmt.Errorf("write path: %w", err) + } + content, err := w.CreateFormFile("content", "content") + if err != nil { + return fmt.Errorf("write content: %w", err) + } + _, err = io.Copy(content, r) + if err != nil { + return fmt.Errorf("copy io: %w", err) + } + i := &Import{} + for _, v := range opts { + v(i) + } + if i.Format == "" || i.Format == ImportFormatSource { + for sfx, lang := range map[string]Language{ + ".py": LanguagePython, + ".sql": LanguageSql, + ".scala": LanguageScala, + ".R": LanguageR, + } { + if !strings.HasSuffix(path, sfx) { + continue + } + i.Language = lang + } + } + if i.Format != "" { + err = w.WriteField("format", i.Format.String()) + if err != nil { + return fmt.Errorf("write format: %w", err) + } + } + if i.Language != "" { + err = w.WriteField("language", i.Language.String()) + if err != nil { + return fmt.Errorf("write language: %w", err) + } + } + if i.Overwrite { + err = w.WriteField("overwrite", "true") + if err != nil { + return fmt.Errorf("write overwrite: %w", err) + } + } + err = w.Close() + if err != nil { + return fmt.Errorf("write close: %w", err) + } + headers := map[string]string{ + "Content-Type": w.FormDataContentType(), + } + return a.workspaceImpl.client.Do(ctx, "POST", "/api/2.0/workspace/import", headers, nil, buf.Bytes(), nil) +} + +// WriteFile is identical to [os.WriteFile] but for Workspace File. +// Keep in mind: It doesn't upload the notebook, but the file and does +// always overwrite it. +func (a *WorkspaceAPI) WriteFile(ctx context.Context, name string, data []byte) error { + return a.Upload(ctx, name, bytes.NewBuffer(data), + UploadFormat(ImportFormatAuto), + UploadOverwrite()) +} + +type DownloadOption = func(q map[string]any) + +func DownloadFormat(f ExportFormat) func(q map[string]any) { + return func(q map[string]any) { + q["format"] = f.String() + } +} + +// Download a notebook or file from the workspace by path. +// +// By default, it acts as if workspace.DownloadFormat(workspace.ExportFormatSource) option is supplied. When using +// workspace.ExportFormatAuto, the `path` is imported or exported as either a workspace file or a notebook, depending +// on an analysis of the `item`’s extension and the file content header provided in the request. +// +// Returns [bytes.Buffer] of the path contents. +func (a *WorkspaceAPI) Download(ctx context.Context, path string, opts ...DownloadOption) (io.ReadCloser, error) { + var buf bytes.Buffer + query := map[string]any{"path": path, "direct_download": true} + for _, v := range opts { + v(query) + } + headers := map[string]string{"Content-Type": "application/json"} + err := a.workspaceImpl.client.Do(ctx, "GET", "/api/2.0/workspace/export", headers, nil, query, &buf) + if err != nil { + return nil, err + } + return io.NopCloser(&buf), nil +} + +// ReadFile is identical to [os.ReadFile] but for workspace files. +func (a *WorkspaceAPI) ReadFile(ctx context.Context, name string) ([]byte, error) { + b, err := a.Download(ctx, name) + if err != nil { + return nil, err + } + defer b.Close() + return io.ReadAll(b) +} From b19b59e1e77f52860922d5f4604745d5aac3010c Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Tue, 11 Feb 2025 12:50:53 +0000 Subject: [PATCH 4/5] fix package --- jobs/v2preview/ext_api_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jobs/v2preview/ext_api_test.go b/jobs/v2preview/ext_api_test.go index c9d3b2921..abd7c2c8d 100644 --- a/jobs/v2preview/ext_api_test.go +++ b/jobs/v2preview/ext_api_test.go @@ -1,4 +1,4 @@ -package jobs +package jobspreview import ( "context" From 2f64ca96c7ffa30a0195187f11b518accc420611 Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Tue, 11 Feb 2025 13:12:08 +0000 Subject: [PATCH 5/5] delete job ext_utilities --- jobs/v2preview/ext_api.go | 34 --- jobs/v2preview/ext_api_test.go | 371 --------------------------------- 2 files changed, 405 deletions(-) delete mode 100644 jobs/v2preview/ext_api.go delete mode 100644 jobs/v2preview/ext_api_test.go diff --git a/jobs/v2preview/ext_api.go b/jobs/v2preview/ext_api.go deleted file mode 100644 index f2ba5bc99..000000000 --- a/jobs/v2preview/ext_api.go +++ /dev/null @@ -1,34 +0,0 @@ -package jobspreview - -import "context" - -// GetRun retrieves a run based on the provided request. -// It handles pagination if the run contains multiple iterations or tasks. -func (a *JobsAPI) GetRun(ctx context.Context, request GetRunRequest) (*Run, error) { - run, err := a.jobsImpl.GetRun(ctx, request) - if err != nil { - return nil, err - } - - // When querying a Job run, a page token is returned when there are more than 100 tasks. No iterations are defined for a Job run. Therefore, the next page in the response only includes the next page of tasks. - // When querying a ForEach task run, a page token is returned when there are more than 100 iterations. Only a single task is returned, corresponding to the ForEach task itself. Therefore, the client only reads the iterations from the next page and not the tasks. - isPaginatingIterations := len(run.Iterations) > 0 - - pageToken := run.NextPageToken - for pageToken != "" { - request.PageToken = pageToken - nextRun, err := a.jobsImpl.GetRun(ctx, request) - if err != nil { - return nil, err - } - - if isPaginatingIterations { - run.Iterations = append(run.Iterations, nextRun.Iterations...) - } else { - run.Tasks = append(run.Tasks, nextRun.Tasks...) - } - pageToken = nextRun.NextPageToken - } - - return run, nil -} diff --git a/jobs/v2preview/ext_api_test.go b/jobs/v2preview/ext_api_test.go deleted file mode 100644 index abd7c2c8d..000000000 --- a/jobs/v2preview/ext_api_test.go +++ /dev/null @@ -1,371 +0,0 @@ -package jobspreview - -import ( - "context" - "testing" - - "github.com/databricks/databricks-sdk-go/databricks/qa" - - "github.com/stretchr/testify/assert" -) - -func TestGetRun(t *testing.T) { - ctx := context.Background() - - t.Run("run with no pagination", func(t *testing.T) { - var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.2/jobs/runs/get?run_id=514594995218126", - Response: Run{ - Iterations: []RunTask{}, - Tasks: []RunTask{ - { - RunId: 123, - TaskKey: "task1", - }, - { - RunId: 1234, - TaskKey: "task2", - }, - }, - NextPageToken: "", - }, - }, - { - Method: "GET", - ReuseRequest: true, - Resource: "/api/2.1/jobs/runs/get?run_id=514594995218126", - Response: Run{ - Iterations: []RunTask{}, - Tasks: []RunTask{ - { - RunId: 123, - TaskKey: "task1", - }, - { - RunId: 1234, - TaskKey: "task2", - }, - }, - }, - }, - } - client, server := requestMocks.Client(t) - defer server.Close() - - mockJobsImpl := &jobsImpl{ - client: client, - } - api := &JobsAPI{jobsImpl: *mockJobsImpl} - - request := GetRunRequest{RunId: 514594995218126} - run, err := api.GetRun(ctx, request) - - assert.NoError(t, err) - assert.Equal(t, 2, len(run.Tasks)) - assert.Empty(t, run.Iterations) - assert.EqualValues(t, 123, run.Tasks[0].RunId) - assert.EqualValues(t, 1234, run.Tasks[1].RunId) - }) - - t.Run("run with two tasks pages", func(t *testing.T) { - var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ - { - Method: "GET", - ReuseRequest: true, - Resource: "/api/2.2/jobs/runs/get?run_id=111222333", - Response: Run{ - Iterations: []RunTask{}, - Tasks: []RunTask{ - { - RunId: 123, - }, - { - RunId: 1234, - }, - }, - JobClusters: []JobCluster{ - { - JobClusterKey: "cluster1", - }, - { - JobClusterKey: "cluster2", - }, - }, - NextPageToken: "token1", - }, - }, - { - Method: "GET", - ReuseRequest: true, - Resource: "/api/2.2/jobs/runs/get?page_token=token1&run_id=111222333", - Response: Run{ - Iterations: []RunTask{}, - Tasks: []RunTask{ - { - RunId: 222, - }, - { - RunId: 333, - }, - }, - JobClusters: []JobCluster{ - { - JobClusterKey: "cluster1", - }, - { - JobClusterKey: "cluster2", - }, - }, - }, - }, - { - Method: "GET", - ReuseRequest: true, - Resource: "/api/2.1/jobs/runs/get?run_id=111222333", - Response: Run{ - Iterations: []RunTask{}, - Tasks: []RunTask{ - { - RunId: 123, - }, - { - RunId: 1234, - }, - { - RunId: 222, - }, - { - RunId: 333, - }, - }, - JobClusters: []JobCluster{ - { - JobClusterKey: "cluster1", - }, - { - JobClusterKey: "cluster2", - }, - }, - }, - }, - } - client, server := requestMocks.Client(t) - defer server.Close() - - mockJobsImpl := &jobsImpl{ - client: client, - } - api := &JobsAPI{jobsImpl: *mockJobsImpl} - - request := GetRunRequest{RunId: 111222333} - run, err := api.GetRun(ctx, request) - - assert.NoError(t, err) - assert.Equal(t, 4, len(run.Tasks)) - assert.Empty(t, run.Iterations) - assert.Empty(t, run.NextPageToken) - expected := []RunTask{ - {RunId: 123, ForceSendFields: []string{"RunId", "TaskKey"}}, - {RunId: 1234, ForceSendFields: []string{"RunId", "TaskKey"}}, - {RunId: 222, ForceSendFields: []string{"RunId", "TaskKey"}}, - {RunId: 333, ForceSendFields: []string{"RunId", "TaskKey"}}, - } - assert.Equal(t, expected, run.Tasks) - }) - - t.Run("clusters array is not increased when paginated", func(t *testing.T) { - var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ - { - Method: "GET", - ReuseRequest: true, - Resource: "/api/2.2/jobs/runs/get?run_id=111222333", - Response: Run{ - Iterations: []RunTask{}, - Tasks: []RunTask{ - { - RunId: 123, - }, - { - RunId: 1234, - }, - }, - JobClusters: []JobCluster{ - { - JobClusterKey: "cluster1", - }, - { - JobClusterKey: "cluster2", - }, - }, - NextPageToken: "token1", - }, - }, - { - Method: "GET", - ReuseRequest: true, - Resource: "/api/2.2/jobs/runs/get?page_token=token1&run_id=111222333", - Response: Run{ - Iterations: []RunTask{}, - Tasks: []RunTask{ - { - RunId: 222, - }, - { - RunId: 333, - }, - }, - JobClusters: []JobCluster{ - { - JobClusterKey: "cluster1", - }, - { - JobClusterKey: "cluster2", - }, - }, - }, - }, - { - Method: "GET", - ReuseRequest: true, - Resource: "/api/2.1/jobs/runs/get?run_id=111222333", - Response: Run{ - Iterations: []RunTask{}, - Tasks: []RunTask{ - { - RunId: 123, - }, - { - RunId: 1234, - }, - { - RunId: 222, - }, - { - RunId: 333, - }, - }, - JobClusters: []JobCluster{ - { - JobClusterKey: "cluster1", - }, - { - JobClusterKey: "cluster2", - }, - }, - }, - }, - } - client, server := requestMocks.Client(t) - defer server.Close() - - mockJobsImpl := &jobsImpl{ - client: client, - } - api := &JobsAPI{jobsImpl: *mockJobsImpl} - - request := GetRunRequest{RunId: 111222333} - run, err := api.GetRun(ctx, request) - - assert.NoError(t, err) - assert.Equal(t, 2, len(run.JobClusters)) - assert.Equal(t, "cluster1", run.JobClusters[0].JobClusterKey) - assert.Equal(t, "cluster2", run.JobClusters[1].JobClusterKey) - }) - - t.Run("run with two iterations pages", func(t *testing.T) { - var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ - { - Method: "GET", - ReuseRequest: true, - Resource: "/api/2.2/jobs/runs/get?run_id=4444", - Response: Run{ - Iterations: []RunTask{ - { - RunId: 123, - }, - { - RunId: 1234, - }, - }, - Tasks: []RunTask{ - { - RunId: 999, - }, - }, - NextPageToken: "token1", - }, - }, - { - Method: "GET", - ReuseRequest: true, - Resource: "/api/2.2/jobs/runs/get?page_token=token1&run_id=4444", - Response: Run{ - Iterations: []RunTask{ - { - RunId: 222, - }, - { - RunId: 333, - }, - }, - Tasks: []RunTask{ - { - RunId: 999, - }, - }, - }, - }, - { - Method: "GET", - ReuseRequest: true, - Resource: "/api/2.1/jobs/runs/get?run_id=4444", - Response: Run{ - Iterations: []RunTask{ - { - RunId: 123, - }, - { - RunId: 1234, - }, - { - RunId: 222, - }, - { - RunId: 333, - }, - }, - Tasks: []RunTask{ - { - RunId: 999, - }, - }, - }, - }, - } - client, server := requestMocks.Client(t) - defer server.Close() - - mockJobsImpl := &jobsImpl{ - client: client, - } - api := &JobsAPI{jobsImpl: *mockJobsImpl} - - request := GetRunRequest{RunId: 4444} - run, err := api.GetRun(ctx, request) - - assert.NoError(t, err) - assert.Equal(t, 4, len(run.Iterations)) - assert.Equal(t, 1, len(run.Tasks)) - assert.Empty(t, run.NextPageToken) - expected := []RunTask{ - {RunId: 123, ForceSendFields: []string{"RunId", "TaskKey"}}, - {RunId: 1234, ForceSendFields: []string{"RunId", "TaskKey"}}, - {RunId: 222, ForceSendFields: []string{"RunId", "TaskKey"}}, - {RunId: 333, ForceSendFields: []string{"RunId", "TaskKey"}}, - } - assert.Equal(t, expected, run.Iterations) - assert.EqualValues(t, 999, run.Tasks[0].RunId) - }) -}