diff --git a/account_client.go b/account_client.go index 9af1d6a83..388d69d5e 100755 --- a/account_client.go +++ b/account_client.go @@ -50,7 +50,7 @@ type AccountClient struct { // workspace storage encryption requires that the workspace is on the E2 // version of the platform. If you have an older workspace, it might not be // on the E2 version of the platform. If you are not sure, contact your - // Databricks reprsentative. + // Databricks representative. EncryptionKeys *deployment.EncryptionKeysAPI // Groups simplify identity management, making it easier to assign access to diff --git a/internal/billing_test.go b/internal/billing_test.go index 4d377854a..e9c7aca2f 100644 --- a/internal/billing_test.go +++ b/internal/billing_test.go @@ -29,8 +29,8 @@ func TestMwsAccLogDelivery(t *testing.T) { } creds, err := a.Credentials.Create(ctx, deployment.CreateCredentialRequest{ CredentialsName: RandomName("sdk-"), - AwsCredentials: deployment.AwsCredentials{ - StsRole: &deployment.StsRole{ + AwsCredentials: deployment.CreateCredentialAwsCredentials{ + StsRole: &deployment.CreateCredentialStsRole{ RoleArn: GetEnvOrSkipTest(t, "TEST_LOGDELIVERY_ARN"), }, }, diff --git a/internal/dbsql_test.go b/internal/dbsql_test.go index 47b154f4e..072523b57 100644 --- a/internal/dbsql_test.go +++ b/internal/dbsql_test.go @@ -1,7 +1,6 @@ package internal import ( - "strconv" "testing" "github.com/databricks/databricks-sdk-go/service/sql" @@ -105,37 +104,6 @@ func TestAccAlerts(t *testing.T) { require.NoError(t, err) assert.Equal(t, len(all), len(names)) assert.Equal(t, alert.Id, names[byId.Name]) - - schedule, err := w.Alerts.CreateSchedule(ctx, sql.CreateRefreshSchedule{ - AlertId: alert.Id, - Cron: "5 4 * * *", - DataSourceId: srcs[0].Id, - }) - require.NoError(t, err) - defer w.Alerts.DeleteScheduleByAlertIdAndScheduleId(ctx, alert.Id, schedule.Id) - - schedules, err := w.Alerts.ListSchedulesByAlertId(ctx, alert.Id) - require.NoError(t, err) - assert.True(t, len(schedules) >= 1) - - me, err := w.CurrentUser.Me(ctx) - require.NoError(t, err) - - userId, err := strconv.ParseInt(me.Id, 10, 64) - require.NoError(t, err) - - sub, err := w.Alerts.Subscribe(ctx, sql.CreateSubscription{ - AlertId: alert.Id, - UserId: userId, - }) - require.NoError(t, err) - - allSubs, err := w.Alerts.GetSubscriptionsByAlertId(ctx, alert.Id) - require.NoError(t, err) - assert.True(t, len(allSubs) >= 1) - - err = w.Alerts.UnsubscribeByAlertIdAndSubscriptionId(ctx, alert.Id, sub.Id) - require.NoError(t, err) } func TestAccDashboards(t *testing.T) { diff --git a/internal/deployment_test.go b/internal/deployment_test.go index 9ecbca3de..8743168a8 100644 --- a/internal/deployment_test.go +++ b/internal/deployment_test.go @@ -75,8 +75,8 @@ func TestMwsAccCredentials(t *testing.T) { } role, err := a.Credentials.Create(ctx, deployment.CreateCredentialRequest{ CredentialsName: RandomName("sdk-"), - AwsCredentials: deployment.AwsCredentials{ - StsRole: &deployment.StsRole{ + AwsCredentials: deployment.CreateCredentialAwsCredentials{ + StsRole: &deployment.CreateCredentialStsRole{ RoleArn: GetEnvOrSkipTest(t, "TEST_CROSSACCOUNT_ARN"), }, }, @@ -217,8 +217,8 @@ func TestMwsAccWorkspaces(t *testing.T) { // See https://github.com/databricks/terraform-provider-databricks/issues/1424 role, err := a.Credentials.Create(ctx, deployment.CreateCredentialRequest{ CredentialsName: RandomName("go-sdk-"), - AwsCredentials: deployment.AwsCredentials{ - StsRole: &deployment.StsRole{ + AwsCredentials: deployment.CreateCredentialAwsCredentials{ + StsRole: &deployment.CreateCredentialStsRole{ RoleArn: GetEnvOrSkipTest(t, "TEST_CROSSACCOUNT_ARN"), }, }, @@ -244,8 +244,8 @@ func TestMwsAccWorkspaces(t *testing.T) { updateRole, err := a.Credentials.Create(ctx, deployment.CreateCredentialRequest{ CredentialsName: RandomName("go-sdk-"), - AwsCredentials: deployment.AwsCredentials{ - StsRole: &deployment.StsRole{ + AwsCredentials: deployment.CreateCredentialAwsCredentials{ + StsRole: &deployment.CreateCredentialStsRole{ RoleArn: GetEnvOrSkipTest(t, "TEST_CROSSACCOUNT_ARN"), }, }, diff --git a/service/clusters/api.go b/service/clusters/api.go index aab8ecdc1..d4fd2080f 100644 --- a/service/clusters/api.go +++ b/service/clusters/api.go @@ -79,8 +79,8 @@ func (a *ClustersAPI) ChangeOwner(ctx context.Context, request ChangeClusterOwne // Creates a new Spark cluster. This method will acquire new instances from the // cloud provider if necessary. This method is asynchronous; the returned // `cluster_id` can be used to poll the cluster status. When this method -// returns, the cluster will be in\na `PENDING` state. The cluster will be -// usable once it enters a `RUNNING` state. +// returns, the cluster will be in a `PENDING` state. The cluster will be usable +// once it enters a `RUNNING` state. // // Note: Databricks may not be able to acquire some of the requested nodes, due // to cloud provider limitations (account limits, spot price, etc.) or transient @@ -364,15 +364,14 @@ func (a *ClustersAPI) GetByClusterIdAndWait(ctx context.Context, clusterId strin // List all clusters. // -// Returns information about all pinned clusters, currently active clusters, up -// to 70 of the most recently terminated interactive clusters in the past 7 -// days, and up to 30 of the most recently terminated job clusters in the past 7 -// days. +// Return information about all pinned clusters, active clusters, up to 200 of +// the most recently terminated all-purpose clusters in the past 30 days, and up +// to 30 of the most recently terminated job clusters in the past 30 days. // // For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated -// interactive clusters in the past 7 days, and 50 terminated job clusters\nin -// the past 7 days, then this API returns the 1 pinned cluster, 4 active -// clusters, all 45 terminated interactive clusters, and the 30 most recently +// all-purpose clusters in the past 30 days, and 50 terminated job clusters in +// the past 30 days, then this API returns the 1 pinned cluster, 4 active +// clusters, all 45 terminated all-purpose clusters, and the 30 most recently // terminated job clusters. // // This method is generated by Databricks SDK Code Generator. @@ -439,15 +438,14 @@ func (a *ClustersAPI) GetByClusterName(ctx context.Context, name string) (*Clust // List all clusters. // -// Returns information about all pinned clusters, currently active clusters, up -// to 70 of the most recently terminated interactive clusters in the past 7 -// days, and up to 30 of the most recently terminated job clusters in the past 7 -// days. +// Return information about all pinned clusters, active clusters, up to 200 of +// the most recently terminated all-purpose clusters in the past 30 days, and up +// to 30 of the most recently terminated job clusters in the past 30 days. // // For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated -// interactive clusters in the past 7 days, and 50 terminated job clusters\nin -// the past 7 days, then this API returns the 1 pinned cluster, 4 active -// clusters, all 45 terminated interactive clusters, and the 30 most recently +// all-purpose clusters in the past 30 days, and 50 terminated job clusters in +// the past 30 days, then this API returns the 1 pinned cluster, 4 active +// clusters, all 45 terminated all-purpose clusters, and the 30 most recently // terminated job clusters. func (a *ClustersAPI) ListByCanUseClient(ctx context.Context, canUseClient string) (*ListClustersResponse, error) { return a.impl.List(ctx, List{ diff --git a/service/clusters/interface.go b/service/clusters/interface.go index 79d0073fa..ddc51b3ac 100755 --- a/service/clusters/interface.go +++ b/service/clusters/interface.go @@ -46,7 +46,7 @@ type ClustersService interface { // Creates a new Spark cluster. This method will acquire new instances from // the cloud provider if necessary. This method is asynchronous; the // returned `cluster_id` can be used to poll the cluster status. When this - // method returns, the cluster will be in\na `PENDING` state. The cluster + // method returns, the cluster will be in a `PENDING` state. The cluster // will be usable once it enters a `RUNNING` state. // // Note: Databricks may not be able to acquire some of the requested nodes, @@ -101,16 +101,16 @@ type ClustersService interface { // List all clusters. // - // Returns information about all pinned clusters, currently active clusters, - // up to 70 of the most recently terminated interactive clusters in the past - // 7 days, and up to 30 of the most recently terminated job clusters in the - // past 7 days. + // Return information about all pinned clusters, active clusters, up to 200 + // of the most recently terminated all-purpose clusters in the past 30 days, + // and up to 30 of the most recently terminated job clusters in the past 30 + // days. // // For example, if there is 1 pinned cluster, 4 active clusters, 45 - // terminated interactive clusters in the past 7 days, and 50 terminated job - // clusters\nin the past 7 days, then this API returns the 1 pinned cluster, - // 4 active clusters, all 45 terminated interactive clusters, and the 30 - // most recently terminated job clusters. + // terminated all-purpose clusters in the past 30 days, and 50 terminated + // job clusters in the past 30 days, then this API returns the 1 pinned + // cluster, 4 active clusters, all 45 terminated all-purpose clusters, and + // the 30 most recently terminated job clusters. // // Use ListAll() to get all ClusterInfo instances List(ctx context.Context, request List) (*ListClustersResponse, error) diff --git a/service/dbfs/api.go b/service/dbfs/api.go index 52c5a9c81..6812fc0e7 100755 --- a/service/dbfs/api.go +++ b/service/dbfs/api.go @@ -226,7 +226,7 @@ func (a *DbfsAPI) Put(ctx context.Context, request Put) error { // an exception with `RESOURCE_DOES_NOT_EXIST`. If the path is a directory, the // read length is negative, or if the offset is negative, this call throws an // exception with `INVALID_PARAMETER_VALUE`. If the read length exceeds 1 MB, -// this call throws an\nexception with `MAX_READ_SIZE_EXCEEDED`. +// this call throws an exception with `MAX_READ_SIZE_EXCEEDED`. // // If `offset + length` exceeds the number of bytes in a file, it reads the // contents until the end of file.", diff --git a/service/dbfs/interface.go b/service/dbfs/interface.go index cf665d22d..716a2d328 100755 --- a/service/dbfs/interface.go +++ b/service/dbfs/interface.go @@ -128,7 +128,7 @@ type DbfsService interface { // throws an exception with `RESOURCE_DOES_NOT_EXIST`. If the path is a // directory, the read length is negative, or if the offset is negative, // this call throws an exception with `INVALID_PARAMETER_VALUE`. If the read - // length exceeds 1 MB, this call throws an\nexception with + // length exceeds 1 MB, this call throws an exception with // `MAX_READ_SIZE_EXCEEDED`. // // If `offset + length` exceeds the number of bytes in a file, it reads the diff --git a/service/deployment/api.go b/service/deployment/api.go index 81504777c..8ba93d6e7 100755 --- a/service/deployment/api.go +++ b/service/deployment/api.go @@ -186,7 +186,7 @@ func NewEncryptionKeys(client *client.DatabricksClient) *EncryptionKeysAPI { // version of the platform. Updating a running workspace with workspace storage // encryption requires that the workspace is on the E2 version of the platform. // If you have an older workspace, it might not be on the E2 version of the -// platform. If you are not sure, contact your Databricks reprsentative. +// platform. If you are not sure, contact your Databricks representative. type EncryptionKeysAPI struct { // impl contains low-level REST API interface, that could be overridden // through WithImpl(EncryptionKeysService) @@ -557,11 +557,6 @@ func (a *PrivateAccessAPI) Impl() PrivateAccessService { // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html func (a *PrivateAccessAPI) Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error) { @@ -576,11 +571,6 @@ func (a *PrivateAccessAPI) Create(ctx context.Context, request UpsertPrivateAcce // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html func (a *PrivateAccessAPI) Delete(ctx context.Context, request DeletePrivateAccesRequest) error { @@ -595,11 +585,6 @@ func (a *PrivateAccessAPI) Delete(ctx context.Context, request DeletePrivateAcce // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html func (a *PrivateAccessAPI) DeleteByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) error { @@ -616,11 +601,6 @@ func (a *PrivateAccessAPI) DeleteByPrivateAccessSettingsId(ctx context.Context, // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html func (a *PrivateAccessAPI) Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error) { @@ -635,11 +615,6 @@ func (a *PrivateAccessAPI) Get(ctx context.Context, request GetPrivateAccesReque // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html func (a *PrivateAccessAPI) GetByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error) { @@ -652,11 +627,6 @@ func (a *PrivateAccessAPI) GetByPrivateAccessSettingsId(ctx context.Context, pri // // Gets a list of all private access settings objects for an account, specified // by ID. -// -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for AWS PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. func (a *PrivateAccessAPI) List(ctx context.Context) ([]PrivateAccessSettings, error) { return a.impl.List(ctx) } @@ -726,6 +696,7 @@ func (a *PrivateAccessAPI) GetByPrivateAccessSettingsName(ctx context.Context, n // access settings are affected by any change. If `public_access_enabled`, // `private_access_level`, or `allowed_vpc_endpoint_ids` are updated, effects of // these changes might take several minutes to propagate to the workspace API. +// // You can share one private access settings object with multiple workspaces in // a single account. However, private access settings are specific to AWS // regions, so only workspaces in the same AWS region can use a given private @@ -734,11 +705,6 @@ func (a *PrivateAccessAPI) GetByPrivateAccessSettingsName(ctx context.Context, n // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html func (a *PrivateAccessAPI) Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error { @@ -930,28 +896,12 @@ func (a *VpcEndpointsAPI) Impl() VpcEndpointsService { // object in AWS used to communicate privately with Databricks over [AWS // PrivateLink]. // -// **Important**: When you register a VPC endpoint to the Databricks workspace -// VPC endpoint service for any workspace, **in this release Databricks enables -// front-end (web application and REST API) access from the source network of -// the VPC endpoint to all workspaces in that AWS region in your Databricks -// account if the workspaces have any PrivateLink connections in their workspace -// configuration**. If you have questions about this behavior, contact your -// Databricks representative. -// -// Within AWS, your VPC endpoint stays in `pendingAcceptance` state until you -// register it in a VPC endpoint configuration through the Account API. After -// you register the VPC endpoint configuration, the Databricks [endpoint -// service] automatically accepts the VPC endpoint and it eventually transitions -// to the `available` state. +// After you create the VPC endpoint configuration, the Databricks [endpoint +// service] automatically accepts the VPC endpoint. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html // [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html @@ -972,11 +922,6 @@ func (a *VpcEndpointsAPI) Create(ctx context.Context, request CreateVpcEndpointR // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html @@ -996,11 +941,6 @@ func (a *VpcEndpointsAPI) Delete(ctx context.Context, request DeleteVpcEndpointR // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html @@ -1015,11 +955,6 @@ func (a *VpcEndpointsAPI) DeleteByVpcEndpointId(ctx context.Context, vpcEndpoint // Gets a VPC endpoint configuration, which represents a [VPC endpoint] object // in AWS used to communicate privately with Databricks over [AWS PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html func (a *VpcEndpointsAPI) Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error) { @@ -1031,11 +966,6 @@ func (a *VpcEndpointsAPI) Get(ctx context.Context, request GetVpcEndpointRequest // Gets a VPC endpoint configuration, which represents a [VPC endpoint] object // in AWS used to communicate privately with Databricks over [AWS PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html func (a *VpcEndpointsAPI) GetByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error) { @@ -1051,11 +981,6 @@ func (a *VpcEndpointsAPI) GetByVpcEndpointId(ctx context.Context, vpcEndpointId // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // -// This operation is available only if your account is on the E2 version of the -// platform and your Databricks account is enabled for PrivateLink (Public -// Preview). Contact your Databricks representative to enable your account for -// PrivateLink. -// // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html func (a *VpcEndpointsAPI) List(ctx context.Context) ([]VpcEndpoint, error) { return a.impl.List(ctx) @@ -1352,15 +1277,25 @@ func (a *WorkspacesAPI) GetByWorkspaceName(ctx context.Context, name string) (*W // configuration for failed workspace deployment for some fields, but not all // fields. For a failed workspace, this request supports updates to the // following fields only: - Credential configuration ID - Storage configuration -// ID - Network configuration ID. Used only if you use customer-managed VPC. - -// Key configuration ID for managed services (control plane storage, such as +// ID - Network configuration ID. Used only to add or change a network +// configuration for a customer-managed VPC. For a failed workspace only, you +// can convert a workspace with Databricks-managed VPC to use a customer-managed +// VPC by adding this ID. You cannot downgrade a workspace with a +// customer-managed VPC to be a Databricks-managed VPC. You can update the +// network configuration for a failed or running workspace to add PrivateLink +// support, though you must also add a private access settings object. - Key +// configuration ID for managed services (control plane storage, such as // notebook source and Databricks SQL queries). Used only if you use // customer-managed keys for managed services. - Key configuration ID for // workspace storage (root S3 bucket and, optionally, EBS volumes). Used only if // you use customer-managed keys for workspace storage. **Important**: If the // workspace was ever in the running state, even if briefly before becoming a // failed workspace, you cannot add a new key configuration ID for workspace -// storage. +// storage. - Private access settings ID to add PrivateLink support. You can add +// or update the private access settings ID to upgrade a workspace to add +// support for front-end, back-end, or both types of connectivity. You cannot +// remove (downgrade) any existing front-end or back-end PrivateLink support on +// a workspace. // // After calling the `PATCH` operation to update the workspace configuration, // make repeated `GET` requests with the workspace ID and check the workspace @@ -1374,13 +1309,11 @@ func (a *WorkspacesAPI) GetByWorkspaceName(ctx context.Context, name string) (*W // a running workspace, this request supports updating the following fields // only: - Credential configuration ID // -// - Network configuration ID. Used only if you already use use customer-managed -// VPC. This change is supported only if you specified a network configuration -// ID in your original workspace creation. In other words, you cannot switch -// from a Databricks-managed VPC to a customer-managed VPC. **Note**: You cannot -// use a network configuration update in this API to add support for PrivateLink -// (in Public Preview). To add PrivateLink to an existing workspace, contact -// your Databricks representative. +// - Network configuration ID. Used only if you already use a customer-managed +// VPC. You cannot convert a running workspace from a Databricks-managed VPC to +// a customer-managed VPC. You can use a network configuration update in this +// API for a failed or running workspace to add support for PrivateLink, +// although you also need to add a private access settings object. // // - Key configuration ID for managed services (control plane storage, such as // notebook source and Databricks SQL queries). Databricks does not directly @@ -1396,17 +1329,18 @@ func (a *WorkspacesAPI) GetByWorkspaceName(ctx context.Context, name string) (*W // re-encrypted with the DMK and the new CMK. - Key configuration ID for // workspace storage (root S3 bucket and, optionally, EBS volumes). You can set // this only if the workspace does not already have a customer-managed key -// configuration for workspace storage. -// -// **Important**: For updating running workspaces, this API is unavailable on -// Mondays, Tuesdays, and Thursdays from 4:30pm-7:30pm PST due to routine -// maintenance. Plan your workspace updates accordingly. For questions about -// this schedule, contact your Databricks representative. +// configuration for workspace storage. - Private access settings ID to add +// PrivateLink support. You can add or update the private access settings ID to +// upgrade a workspace to add support for front-end, back-end, or both types of +// connectivity. You cannot remove (downgrade) any existing front-end or +// back-end PrivateLink support on a workspace. // // **Important**: To update a running workspace, your workspace must have no -// running cluster instances, which includes all-purpose clusters, job clusters, -// and pools that might have running clusters. Terminate all cluster instances -// in the workspace before calling this API. +// running compute resources that run in your workspace's VPC in the Classic +// data plane. For example, stop all all-purpose clusters, job clusters, pools +// with running clusters, and Classic SQL warehouses. If you do not terminate +// all cluster instances in the workspace before calling this API, the request +// will fail. // // ### Wait until changes take effect. After calling the `PATCH` operation to // update the workspace configuration, make repeated `GET` requests with the diff --git a/service/deployment/interface.go b/service/deployment/interface.go index 389c277b2..26125131e 100755 --- a/service/deployment/interface.go +++ b/service/deployment/interface.go @@ -66,7 +66,7 @@ type CredentialsService interface { // version of the platform. Updating a running workspace with workspace storage // encryption requires that the workspace is on the E2 version of the platform. // If you have an older workspace, it might not be on the E2 version of the -// platform. If you are not sure, contact your Databricks reprsentative. +// platform. If you are not sure, contact your Databricks representative. type EncryptionKeysService interface { // Create encryption key configuration. @@ -198,11 +198,6 @@ type PrivateAccessService interface { // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // - // This operation is available only if your account is on the E2 version of - // the platform and your Databricks account is enabled for PrivateLink - // (Public Preview). Contact your Databricks representative to enable your - // account for PrivateLink. - // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error) @@ -215,11 +210,6 @@ type PrivateAccessService interface { // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // - // This operation is available only if your account is on the E2 version of - // the platform and your Databricks account is enabled for PrivateLink - // (Public Preview). Contact your Databricks representative to enable your - // account for PrivateLink. - // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html Delete(ctx context.Context, request DeletePrivateAccesRequest) error @@ -232,11 +222,6 @@ type PrivateAccessService interface { // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // - // This operation is available only if your account is on the E2 version of - // the platform and your Databricks account is enabled for PrivateLink - // (Public Preview). Contact your Databricks representative to enable your - // account for PrivateLink. - // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error) @@ -245,11 +230,6 @@ type PrivateAccessService interface { // // Gets a list of all private access settings objects for an account, // specified by ID. - // - // This operation is available only if your account is on the E2 version of - // the platform and your Databricks account is enabled for AWS PrivateLink - // (Public Preview). Contact your Databricks representative to enable your - // account for PrivateLink. List(ctx context.Context) ([]PrivateAccessSettings, error) // Replace private access settings. @@ -265,20 +245,16 @@ type PrivateAccessService interface { // the private access settings are affected by any change. If // `public_access_enabled`, `private_access_level`, or // `allowed_vpc_endpoint_ids` are updated, effects of these changes might - // take several minutes to propagate to the workspace API. You can share one - // private access settings object with multiple workspaces in a single - // account. However, private access settings are specific to AWS regions, so - // only workspaces in the same AWS region can use a given private access - // settings object. + // take several minutes to propagate to the workspace API. + // + // You can share one private access settings object with multiple workspaces + // in a single account. However, private access settings are specific to AWS + // regions, so only workspaces in the same AWS region can use a given + // private access settings object. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // - // This operation is available only if your account is on the E2 version of - // the platform and your Databricks account is enabled for PrivateLink - // (Public Preview). Contact your Databricks representative to enable your - // account for PrivateLink. - // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error @@ -343,28 +319,12 @@ type VpcEndpointsService interface { // object in AWS used to communicate privately with Databricks over [AWS // PrivateLink]. // - // **Important**: When you register a VPC endpoint to the Databricks - // workspace VPC endpoint service for any workspace, **in this release - // Databricks enables front-end (web application and REST API) access from - // the source network of the VPC endpoint to all workspaces in that AWS - // region in your Databricks account if the workspaces have any PrivateLink - // connections in their workspace configuration**. If you have questions - // about this behavior, contact your Databricks representative. - // - // Within AWS, your VPC endpoint stays in `pendingAcceptance` state until - // you register it in a VPC endpoint configuration through the Account API. - // After you register the VPC endpoint configuration, the Databricks - // [endpoint service] automatically accepts the VPC endpoint and it - // eventually transitions to the `available` state. + // After you create the VPC endpoint configuration, the Databricks [endpoint + // service] automatically accepts the VPC endpoint. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // - // This operation is available only if your account is on the E2 version of - // the platform and your Databricks account is enabled for PrivateLink - // (Public Preview). Contact your Databricks representative to enable your - // account for PrivateLink. - // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html // [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html @@ -384,11 +344,6 @@ type VpcEndpointsService interface { // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // - // This operation is available only if your account is on the E2 version of - // the platform and your Databricks account is enabled for PrivateLink - // (Public Preview). Contact your Databricks representative to enable your - // account for PrivateLink. - // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [AWS VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html @@ -400,11 +355,6 @@ type VpcEndpointsService interface { // object in AWS used to communicate privately with Databricks over [AWS // PrivateLink]. // - // This operation is available only if your account is on the E2 version of - // the platform and your Databricks account is enabled for PrivateLink - // (Public Preview). Contact your Databricks representative to enable your - // account for PrivateLink. - // // [AWS PrivateLink]: https://aws.amazon.com/privatelink // [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error) @@ -416,11 +366,6 @@ type VpcEndpointsService interface { // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. // - // This operation is available only if your account is on the E2 version of - // the platform and your Databricks account is enabled for PrivateLink - // (Public Preview). Contact your Databricks representative to enable your - // account for PrivateLink. - // // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html List(ctx context.Context) ([]VpcEndpoint, error) } @@ -501,15 +446,25 @@ type WorkspacesService interface { // configuration for failed workspace deployment for some fields, but not // all fields. For a failed workspace, this request supports updates to the // following fields only: - Credential configuration ID - Storage - // configuration ID - Network configuration ID. Used only if you use - // customer-managed VPC. - Key configuration ID for managed services - // (control plane storage, such as notebook source and Databricks SQL - // queries). Used only if you use customer-managed keys for managed - // services. - Key configuration ID for workspace storage (root S3 bucket - // and, optionally, EBS volumes). Used only if you use customer-managed keys - // for workspace storage. **Important**: If the workspace was ever in the - // running state, even if briefly before becoming a failed workspace, you - // cannot add a new key configuration ID for workspace storage. + // configuration ID - Network configuration ID. Used only to add or change a + // network configuration for a customer-managed VPC. For a failed workspace + // only, you can convert a workspace with Databricks-managed VPC to use a + // customer-managed VPC by adding this ID. You cannot downgrade a workspace + // with a customer-managed VPC to be a Databricks-managed VPC. You can + // update the network configuration for a failed or running workspace to add + // PrivateLink support, though you must also add a private access settings + // object. - Key configuration ID for managed services (control plane + // storage, such as notebook source and Databricks SQL queries). Used only + // if you use customer-managed keys for managed services. - Key + // configuration ID for workspace storage (root S3 bucket and, optionally, + // EBS volumes). Used only if you use customer-managed keys for workspace + // storage. **Important**: If the workspace was ever in the running state, + // even if briefly before becoming a failed workspace, you cannot add a new + // key configuration ID for workspace storage. - Private access settings ID + // to add PrivateLink support. You can add or update the private access + // settings ID to upgrade a workspace to add support for front-end, + // back-end, or both types of connectivity. You cannot remove (downgrade) + // any existing front-end or back-end PrivateLink support on a workspace. // // After calling the `PATCH` operation to update the workspace // configuration, make repeated `GET` requests with the workspace ID and @@ -525,14 +480,12 @@ type WorkspacesService interface { // For a running workspace, this request supports updating the following // fields only: - Credential configuration ID // - // - Network configuration ID. Used only if you already use use - // customer-managed VPC. This change is supported only if you specified a - // network configuration ID in your original workspace creation. In other - // words, you cannot switch from a Databricks-managed VPC to a - // customer-managed VPC. **Note**: You cannot use a network configuration - // update in this API to add support for PrivateLink (in Public Preview). To - // add PrivateLink to an existing workspace, contact your Databricks - // representative. + // - Network configuration ID. Used only if you already use a + // customer-managed VPC. You cannot convert a running workspace from a + // Databricks-managed VPC to a customer-managed VPC. You can use a network + // configuration update in this API for a failed or running workspace to add + // support for PrivateLink, although you also need to add a private access + // settings object. // // - Key configuration ID for managed services (control plane storage, such // as notebook source and Databricks SQL queries). Databricks does not @@ -549,17 +502,18 @@ type WorkspacesService interface { // new CMK. - Key configuration ID for workspace storage (root S3 bucket // and, optionally, EBS volumes). You can set this only if the workspace // does not already have a customer-managed key configuration for workspace - // storage. - // - // **Important**: For updating running workspaces, this API is unavailable - // on Mondays, Tuesdays, and Thursdays from 4:30pm-7:30pm PST due to routine - // maintenance. Plan your workspace updates accordingly. For questions about - // this schedule, contact your Databricks representative. + // storage. - Private access settings ID to add PrivateLink support. You can + // add or update the private access settings ID to upgrade a workspace to + // add support for front-end, back-end, or both types of connectivity. You + // cannot remove (downgrade) any existing front-end or back-end PrivateLink + // support on a workspace. // // **Important**: To update a running workspace, your workspace must have no - // running cluster instances, which includes all-purpose clusters, job - // clusters, and pools that might have running clusters. Terminate all - // cluster instances in the workspace before calling this API. + // running compute resources that run in your workspace's VPC in the Classic + // data plane. For example, stop all all-purpose clusters, job clusters, + // pools with running clusters, and Classic SQL warehouses. If you do not + // terminate all cluster instances in the workspace before calling this API, + // the request will fail. // // ### Wait until changes take effect. After calling the `PATCH` operation // to update the workspace configuration, make repeated `GET` requests with diff --git a/service/deployment/model.go b/service/deployment/model.go index b66dd4aa0..71b07a145 100644 --- a/service/deployment/model.go +++ b/service/deployment/model.go @@ -43,12 +43,21 @@ type CreateAwsKeyInfo struct { ReuseKeyForClusterVolumes bool `json:"reuse_key_for_cluster_volumes,omitempty"` } +type CreateCredentialAwsCredentials struct { + StsRole *CreateCredentialStsRole `json:"sts_role,omitempty"` +} + type CreateCredentialRequest struct { - AwsCredentials AwsCredentials `json:"aws_credentials"` + AwsCredentials CreateCredentialAwsCredentials `json:"aws_credentials"` // The human-readable name of the credential configuration object. CredentialsName string `json:"credentials_name"` } +type CreateCredentialStsRole struct { + // The Amazon Resource Name (ARN) of the cross account role. + RoleArn string `json:"role_arn,omitempty"` +} + type CreateCustomerManagedKeyRequest struct { AwsKeyInfo CreateAwsKeyInfo `json:"aws_key_info"` // The cases that the key can be used for. @@ -155,10 +164,10 @@ type CreateWorkspaceRequest struct { // [AWS Pricing]: https://databricks.com/product/aws-pricing PricingTier PricingTier `json:"pricing_tier,omitempty"` // ID of the workspace's private access settings object. Only used for - // PrivateLink (Public Preview). This ID must be specified for customers - // using [AWS PrivateLink] for either front-end (user-to-workspace - // connection), back-end (data plane to control plane connection), or both - // connection types. + // PrivateLink. This ID must be specified for customers using [AWS + // PrivateLink] for either front-end (user-to-workspace connection), + // back-end (data plane to control plane connection), or both connection + // types. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. @@ -429,10 +438,10 @@ type GetWorkspaceRequest struct { // The configurations for the GKE cluster of a Databricks workspace. type GkeConfig struct { // Specifies the network connectivity types for the GKE nodes and the GKE - // master network. \n + // master network. // // Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the - // workspace. The GKE nodes will not have public IPs.\n + // workspace. The GKE nodes will not have public IPs. // // Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of // a public GKE cluster have public IP addresses. @@ -445,10 +454,10 @@ type GkeConfig struct { } // Specifies the network connectivity types for the GKE nodes and the GKE master -// network. \n +// network. // // Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the -// workspace. The GKE nodes will not have public IPs.\n +// workspace. The GKE nodes will not have public IPs. // // Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of a // public GKE cluster have public IP addresses. @@ -632,7 +641,6 @@ func (pt *PricingTier) Type() string { // The private access level controls which VPC endpoints can connect to the UI // or API of any workspace that attaches this private access settings object. * -// `ANY` (deprecated): Any VPC endpoint can connect to your workspace. * // `ACCOUNT` level access (the default) allows only VPC endpoints that are // registered in your Databricks account connect to your workspace. * `ENDPOINT` // level access allows only specified VPC endpoints connect to your workspace. @@ -641,8 +649,6 @@ type PrivateAccessLevel string const PrivateAccessLevelAccount PrivateAccessLevel = `ACCOUNT` -const PrivateAccessLevelAny PrivateAccessLevel = `ANY` - const PrivateAccessLevelEndpoint PrivateAccessLevel = `ENDPOINT` // String representation for [fmt.Print] @@ -653,11 +659,11 @@ func (pal *PrivateAccessLevel) String() string { // Set raw string value and validate it against allowed values func (pal *PrivateAccessLevel) Set(v string) error { switch v { - case `ACCOUNT`, `ANY`, `ENDPOINT`: + case `ACCOUNT`, `ENDPOINT`: *pal = PrivateAccessLevel(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ACCOUNT", "ANY", "ENDPOINT"`, v) + return fmt.Errorf(`value "%s" is not one of "ACCOUNT", "ENDPOINT"`, v) } } @@ -686,11 +692,10 @@ type PrivateAccessSettings struct { AllowedVpcEndpointIds []string `json:"allowed_vpc_endpoint_ids,omitempty"` // The private access level controls which VPC endpoints can connect to the // UI or API of any workspace that attaches this private access settings - // object. * `ANY` (deprecated): Any VPC endpoint can connect to your - // workspace. * `ACCOUNT` level access (the default) allows only VPC - // endpoints that are registered in your Databricks account connect to your - // workspace. * `ENDPOINT` level access allows only specified VPC endpoints - // connect to your workspace. For details, see `allowed_vpc_endpoint_ids`. + // object. * `ACCOUNT` level access (the default) allows only VPC endpoints + // that are registered in your Databricks account connect to your workspace. + // * `ENDPOINT` level access allows only specified VPC endpoints connect to + // your workspace. For details, see `allowed_vpc_endpoint_ids`. PrivateAccessLevel PrivateAccessLevel `json:"private_access_level,omitempty"` // Databricks private access settings ID. PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"` @@ -745,14 +750,9 @@ type UpdateWorkspaceRequest struct { // object. This parameter is available only for updating failed workspaces. ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"` // The ID of the workspace's network configuration object. Used only if you - // already use a customer-managed VPC. This change is supported only if you - // specified a network configuration ID when the workspace was created. In - // other words, you cannot switch from a Databricks-managed VPC to a - // customer-managed VPC. This parameter is available for updating both - // failed and running workspaces. **Note**: You cannot use a network - // configuration update in this API to add support for PrivateLink (Public - // Preview). To add PrivateLink to an existing workspace, contact your - // Databricks representative. + // already use a customer-managed VPC. For failed workspaces only, you can + // switch from a Databricks-managed VPC to a customer-managed VPC by + // updating the workspace to add a network configuration ID. NetworkId string `json:"network_id,omitempty"` // The ID of the workspace's storage configuration object. This parameter is // available only for updating failed workspaces. @@ -782,11 +782,10 @@ type UpsertPrivateAccessSettingsRequest struct { AllowedVpcEndpointIds []string `json:"allowed_vpc_endpoint_ids,omitempty"` // The private access level controls which VPC endpoints can connect to the // UI or API of any workspace that attaches this private access settings - // object. * `ANY` (deprecated): Any VPC endpoint can connect to your - // workspace. * `ACCOUNT` level access (the default) allows only VPC - // endpoints that are registered in your Databricks account connect to your - // workspace. * `ENDPOINT` level access allows only specified VPC endpoints - // connect to your workspace. For details, see `allowed_vpc_endpoint_ids`. + // object. * `ACCOUNT` level access (the default) allows only VPC endpoints + // that are registered in your Databricks account connect to your workspace. + // * `ENDPOINT` level access allows only specified VPC endpoints connect to + // your workspace. For details, see `allowed_vpc_endpoint_ids`. PrivateAccessLevel PrivateAccessLevel `json:"private_access_level,omitempty"` // Databricks Account API private access settings ID. PrivateAccessSettingsId string `json:"-" url:"-"` @@ -975,10 +974,9 @@ type Workspace struct { // [AWS Pricing]: https://databricks.com/product/aws-pricing PricingTier PricingTier `json:"pricing_tier,omitempty"` // ID of the workspace's private access settings object. Only used for - // PrivateLink (Public Preview). You must specify this ID if you are using - // [AWS PrivateLink] for either front-end (user-to-workspace connection), - // back-end (data plane to control plane connection), or both connection - // types. + // PrivateLink. You must specify this ID if you are using [AWS PrivateLink] + // for either front-end (user-to-workspace connection), back-end (data plane + // to control plane connection), or both connection types. // // Before configuring PrivateLink, read the [Databricks article about // PrivateLink]. diff --git a/service/endpoints/api.go b/service/endpoints/api.go index 1e25669e4..f6cfb4151 100755 --- a/service/endpoints/api.go +++ b/service/endpoints/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// The Serverless Real-Time Inference Serving Endpoints API allows you to create, update, and delete model serving endpoints. +// The Serving Endpoints API allows you to create, update, and delete model serving endpoints. package endpoints import ( @@ -21,8 +21,8 @@ func NewServingEndpoints(client *client.DatabricksClient) *ServingEndpointsAPI { } } -// The Serverless Real-Time Inference Serving Endpoints API allows you to -// create, update, and delete model serving endpoints. +// The Serving Endpoints API allows you to create, update, and delete model +// serving endpoints. // // You can use a serving endpoint to serve models from the Databricks Model // Registry. Endpoints expose the underlying models as scalable REST API @@ -55,8 +55,7 @@ func (a *ServingEndpointsAPI) Impl() ServingEndpointsService { // Retrieve the logs associated with building the model's environment for a // given serving endpoint's served model. // -// Retrieves the build logs associated with the provided served model. Please -// note that this API is in preview and may change in the future. +// Retrieves the build logs associated with the provided served model. func (a *ServingEndpointsAPI) BuildLogs(ctx context.Context, request BuildLogsRequest) (*BuildLogsResponse, error) { return a.impl.BuildLogs(ctx, request) } @@ -64,8 +63,7 @@ func (a *ServingEndpointsAPI) BuildLogs(ctx context.Context, request BuildLogsRe // Retrieve the logs associated with building the model's environment for a // given serving endpoint's served model. // -// Retrieves the build logs associated with the provided served model. Please -// note that this API is in preview and may change in the future. +// Retrieves the build logs associated with the provided served model. func (a *ServingEndpointsAPI) BuildLogsByNameAndServedModelName(ctx context.Context, name string, servedModelName string) (*BuildLogsResponse, error) { return a.impl.BuildLogs(ctx, BuildLogsRequest{ Name: name, @@ -136,8 +134,7 @@ func (a *ServingEndpointsAPI) DeleteByName(ctx context.Context, name string) err // in Prometheus or OpenMetrics exposition format. // // Retrieves the metrics associated with the provided serving endpoint in either -// Prometheus or OpenMetrics exposition format. Please note that this API is in -// preview and may change in the future. +// Prometheus or OpenMetrics exposition format. func (a *ServingEndpointsAPI) ExportMetrics(ctx context.Context, request ExportMetricsRequest) error { return a.impl.ExportMetrics(ctx, request) } @@ -146,8 +143,7 @@ func (a *ServingEndpointsAPI) ExportMetrics(ctx context.Context, request ExportM // in Prometheus or OpenMetrics exposition format. // // Retrieves the metrics associated with the provided serving endpoint in either -// Prometheus or OpenMetrics exposition format. Please note that this API is in -// preview and may change in the future. +// Prometheus or OpenMetrics exposition format. func (a *ServingEndpointsAPI) ExportMetricsByName(ctx context.Context, name string) error { return a.impl.ExportMetrics(ctx, ExportMetricsRequest{ Name: name, @@ -178,8 +174,7 @@ func (a *ServingEndpointsAPI) List(ctx context.Context) (*ListEndpointsResponse, // Retrieve the most recent log lines associated with a given serving endpoint's // served model. // -// Retrieves the service logs associated with the provided served model. Please -// note that this API is in preview and may change in the future. +// Retrieves the service logs associated with the provided served model. func (a *ServingEndpointsAPI) Logs(ctx context.Context, request LogsRequest) (*ServerLogsResponse, error) { return a.impl.Logs(ctx, request) } @@ -187,8 +182,7 @@ func (a *ServingEndpointsAPI) Logs(ctx context.Context, request LogsRequest) (*S // Retrieve the most recent log lines associated with a given serving endpoint's // served model. // -// Retrieves the service logs associated with the provided served model. Please -// note that this API is in preview and may change in the future. +// Retrieves the service logs associated with the provided served model. func (a *ServingEndpointsAPI) LogsByNameAndServedModelName(ctx context.Context, name string, servedModelName string) (*ServerLogsResponse, error) { return a.impl.Logs(ctx, LogsRequest{ Name: name, diff --git a/service/endpoints/interface.go b/service/endpoints/interface.go index 48585b554..8df1b64e9 100755 --- a/service/endpoints/interface.go +++ b/service/endpoints/interface.go @@ -6,8 +6,8 @@ import ( "context" ) -// The Serverless Real-Time Inference Serving Endpoints API allows you to -// create, update, and delete model serving endpoints. +// The Serving Endpoints API allows you to create, update, and delete model +// serving endpoints. // // You can use a serving endpoint to serve models from the Databricks Model // Registry. Endpoints expose the underlying models as scalable REST API @@ -25,7 +25,6 @@ type ServingEndpointsService interface { // given serving endpoint's served model. // // Retrieves the build logs associated with the provided served model. - // Please note that this API is in preview and may change in the future. BuildLogs(ctx context.Context, request BuildLogsRequest) (*BuildLogsResponse, error) // Create a new serving endpoint. @@ -38,8 +37,7 @@ type ServingEndpointsService interface { // time in Prometheus or OpenMetrics exposition format. // // Retrieves the metrics associated with the provided serving endpoint in - // either Prometheus or OpenMetrics exposition format. Please note that this - // API is in preview and may change in the future. + // either Prometheus or OpenMetrics exposition format. ExportMetrics(ctx context.Context, request ExportMetricsRequest) error // Get a single serving endpoint. @@ -54,7 +52,6 @@ type ServingEndpointsService interface { // endpoint's served model. // // Retrieves the service logs associated with the provided served model. - // Please note that this API is in preview and may change in the future. Logs(ctx context.Context, request LogsRequest) (*ServerLogsResponse, error) // Query a serving endpoint with provided model input. diff --git a/service/endpoints/model.go b/service/endpoints/model.go index b0618190e..151468183 100755 --- a/service/endpoints/model.go +++ b/service/endpoints/model.go @@ -242,6 +242,14 @@ type ServedModelOutput struct { ScaleToZeroEnabled bool `json:"scale_to_zero_enabled,omitempty"` // Information corresponding to the state of the Served Model. State *ServedModelState `json:"state,omitempty"` + // The workload size of the served model. The workload size corresponds to a + // range of provisioned concurrency that the compute will autoscale between. + // A single unit of provisioned concurrency can process one request at a + // time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), + // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 + // provisioned concurrency). If scale-to-zero is enabled, the lower bound of + // the provisioned concurrency for each workload size will be 0. + WorkloadSize string `json:"workload_size,omitempty"` } type ServedModelSpec struct { diff --git a/service/jobs/model.go b/service/jobs/model.go index 2c47513dd..9407ec713 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -254,6 +254,11 @@ type CreateJob struct { // An optional timeout applied to each run of this job. The default behavior // is to have no timeout. TimeoutSeconds int `json:"timeout_seconds,omitempty"` + // Trigger settings for the job. Can be used to trigger a run when new files + // arrive in an external location. The default behavior is that the job runs + // only when triggered by clicking “Run Now” in the Jobs UI or sending + // an API request to `runNow`. + Trigger *TriggerSettings `json:"trigger,omitempty"` // A collection of system notification IDs to notify when the run begins or // completes. The default behavior is to not send any system notifications. WebhookNotifications *JobWebhookNotifications `json:"webhook_notifications,omitempty"` @@ -398,6 +403,21 @@ type ExportRunOutput struct { Views []ViewItem `json:"views,omitempty"` } +type FileArrivalTriggerSettings struct { + // If set, the trigger starts a run only after the specified amount of time + // passed since the last time the trigger fired. The minimum allowed value + // is 60 seconds + MinTimeBetweenTriggerSeconds int `json:"min_time_between_trigger_seconds,omitempty"` + // URL to be monitored for file arrivals. The path must point to the root or + // a subpath of the external location. + Url string `json:"url,omitempty"` + // If set, the trigger starts a run only after no file activity has occurred + // for the specified amount of time. This makes it possible to wait for a + // batch of incoming files to arrive before triggering a run. The minimum + // allowed value is 60 seconds. + WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` +} + // Get a single job type Get struct { // The canonical identifier of the job to retrieve information about. This @@ -515,6 +535,8 @@ type Job struct { // Settings for this job and all of its runs. These settings can be updated // using the `resetJob` method. Settings *JobSettings `json:"settings,omitempty"` + // History of the file arrival trigger associated with the job. + TriggerHistory *TriggerHistory `json:"trigger_history,omitempty"` } type JobCluster struct { @@ -602,6 +624,11 @@ type JobSettings struct { // An optional timeout applied to each run of this job. The default behavior // is to have no timeout. TimeoutSeconds int `json:"timeout_seconds,omitempty"` + // Trigger settings for the job. Can be used to trigger a run when new files + // arrive in an external location. The default behavior is that the job runs + // only when triggered by clicking “Run Now” in the Jobs UI or sending + // an API request to `runNow`. + Trigger *TriggerSettings `json:"trigger,omitempty"` // A collection of system notification IDs to notify when the run begins or // completes. The default behavior is to not send any system notifications. WebhookNotifications *JobWebhookNotifications `json:"webhook_notifications,omitempty"` @@ -1960,9 +1987,67 @@ type TaskDependenciesItem struct { TaskKey string `json:"task_key,omitempty"` } +type TriggerEvaluation struct { + // Human-readable description of the the trigger evaluation result. Explains + // why the trigger evaluation triggered or did not trigger a run, or failed. + Description string `json:"description,omitempty"` + // The ID of the run that was triggered by the trigger evaluation. Only + // returned if a run was triggered. + RunId int64 `json:"run_id,omitempty"` + // Timestamp at which the trigger was evaluated. + Timestamp int64 `json:"timestamp,omitempty"` +} + +type TriggerHistory struct { + // The last time the trigger failed to evaluate. + LastFailed *TriggerEvaluation `json:"last_failed,omitempty"` + // The last time the trigger was evaluated but did not trigger a run. + LastNotTriggered *TriggerEvaluation `json:"last_not_triggered,omitempty"` + // The last time the run was triggered due to a file arrival. + LastTriggered *TriggerEvaluation `json:"last_triggered,omitempty"` +} + +type TriggerSettings struct { + // File arrival trigger settings. + FileArrival *FileArrivalTriggerSettings `json:"file_arrival,omitempty"` + // Whether this trigger is paused or not. + PauseStatus TriggerSettingsPauseStatus `json:"pause_status,omitempty"` +} + +// Whether this trigger is paused or not. +type TriggerSettingsPauseStatus string + +const TriggerSettingsPauseStatusPaused TriggerSettingsPauseStatus = `PAUSED` + +const TriggerSettingsPauseStatusUnpaused TriggerSettingsPauseStatus = `UNPAUSED` + +// String representation for [fmt.Print] +func (tsps *TriggerSettingsPauseStatus) String() string { + return string(*tsps) +} + +// Set raw string value and validate it against allowed values +func (tsps *TriggerSettingsPauseStatus) Set(v string) error { + switch v { + case `PAUSED`, `UNPAUSED`: + *tsps = TriggerSettingsPauseStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PAUSED", "UNPAUSED"`, v) + } +} + +// Type always returns TriggerSettingsPauseStatus to satisfy [pflag.Value] interface +func (tsps *TriggerSettingsPauseStatus) Type() string { + return "TriggerSettingsPauseStatus" +} + // This describes an enum type TriggerType string +// Indicates a run that is triggered by a file arrival. +const TriggerTypeFileArrival TriggerType = `FILE_ARRIVAL` + // One time triggers that fire a single run. This occurs you triggered a single // run on demand through the UI or the API. const TriggerTypeOneTime TriggerType = `ONE_TIME` @@ -1982,11 +2067,11 @@ func (tt *TriggerType) String() string { // Set raw string value and validate it against allowed values func (tt *TriggerType) Set(v string) error { switch v { - case `ONE_TIME`, `PERIODIC`, `RETRY`: + case `FILE_ARRIVAL`, `ONE_TIME`, `PERIODIC`, `RETRY`: *tt = TriggerType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ONE_TIME", "PERIODIC", "RETRY"`, v) + return fmt.Errorf(`value "%s" is not one of "FILE_ARRIVAL", "ONE_TIME", "PERIODIC", "RETRY"`, v) } } diff --git a/service/mlflow/api.go b/service/mlflow/api.go index 4981a5572..cdeeff003 100755 --- a/service/mlflow/api.go +++ b/service/mlflow/api.go @@ -90,7 +90,7 @@ func (a *ExperimentsAPI) GetByExperimentId(ctx context.Context, experimentId str // // This endpoint will return deleted experiments, but prefers the active // experiment if an active and deleted experiment share the same name. If -// multiple deleted\nexperiments share the same name, the API will return one of +// multiple deleted experiments share the same name, the API will return one of // them. // // Throws `RESOURCE_DOES_NOT_EXIST` if no experiment with the specified name @@ -105,7 +105,7 @@ func (a *ExperimentsAPI) GetByName(ctx context.Context, request GetByNameRequest // // This endpoint will return deleted experiments, but prefers the active // experiment if an active and deleted experiment share the same name. If -// multiple deleted\nexperiments share the same name, the API will return one of +// multiple deleted experiments share the same name, the API will return one of // them. // // Throws `RESOURCE_DOES_NOT_EXIST` if no experiment with the specified name @@ -145,22 +145,24 @@ func (a *ExperimentsAPI) ListAll(ctx context.Context, request ListExperimentsReq // Restores an experiment. // -// "Restore an experiment marked for deletion. This also restores\nassociated +// "Restore an experiment marked for deletion. This also restores associated // metadata, runs, metrics, params, and tags. If experiment uses FileStore, -// underlying\nartifacts associated with experiment are also restored.\n\nThrows -// `RESOURCE_DOES_NOT_EXIST` if experiment was never created or was permanently -// deleted.", +// underlying artifacts associated with experiment are also restored. +// +// Throws `RESOURCE_DOES_NOT_EXIST` if experiment was never created or was +// permanently deleted.", func (a *ExperimentsAPI) Restore(ctx context.Context, request RestoreExperiment) error { return a.impl.Restore(ctx, request) } // Restores an experiment. // -// "Restore an experiment marked for deletion. This also restores\nassociated +// "Restore an experiment marked for deletion. This also restores associated // metadata, runs, metrics, params, and tags. If experiment uses FileStore, -// underlying\nartifacts associated with experiment are also restored.\n\nThrows -// `RESOURCE_DOES_NOT_EXIST` if experiment was never created or was permanently -// deleted.", +// underlying artifacts associated with experiment are also restored. +// +// Throws `RESOURCE_DOES_NOT_EXIST` if experiment was never created or was +// permanently deleted.", func (a *ExperimentsAPI) RestoreByExperimentId(ctx context.Context, experimentId string) error { return a.impl.Restore(ctx, RestoreExperiment{ ExperimentId: experimentId, @@ -460,8 +462,8 @@ func (a *MLflowRunsAPI) Get(ctx context.Context, request GetRunRequest) (*GetRun // Request Limits ------------------------------- A single JSON-serialized API // request may be up to 1 MB in size and contain: // -// * No more than 1000 metrics, params, and tags in total * Up to 1000 -// metrics\n- Up to 100 params * Up to 100 tags +// * No more than 1000 metrics, params, and tags in total * Up to 1000 metrics - +// Up to 100 params * Up to 100 tags // // For example, a valid request might contain 900 metrics, 50 params, and 50 // tags, but logging 900 metrics, 50 params, and 51 tags is invalid. diff --git a/service/mlflow/interface.go b/service/mlflow/interface.go index 284a3e29f..3e0fc9323 100755 --- a/service/mlflow/interface.go +++ b/service/mlflow/interface.go @@ -38,8 +38,8 @@ type ExperimentsService interface { // // This endpoint will return deleted experiments, but prefers the active // experiment if an active and deleted experiment share the same name. If - // multiple deleted\nexperiments share the same name, the API will return - // one of them. + // multiple deleted experiments share the same name, the API will return one + // of them. // // Throws `RESOURCE_DOES_NOT_EXIST` if no experiment with the specified name // exists.S @@ -54,11 +54,12 @@ type ExperimentsService interface { // Restores an experiment. // - // "Restore an experiment marked for deletion. This also - // restores\nassociated metadata, runs, metrics, params, and tags. If - // experiment uses FileStore, underlying\nartifacts associated with - // experiment are also restored.\n\nThrows `RESOURCE_DOES_NOT_EXIST` if - // experiment was never created or was permanently deleted.", + // "Restore an experiment marked for deletion. This also restores associated + // metadata, runs, metrics, params, and tags. If experiment uses FileStore, + // underlying artifacts associated with experiment are also restored. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if experiment was never created or was + // permanently deleted.", Restore(ctx context.Context, request RestoreExperiment) error // Search experiments. @@ -188,7 +189,7 @@ type MLflowRunsService interface { // API request may be up to 1 MB in size and contain: // // * No more than 1000 metrics, params, and tags in total * Up to 1000 - // metrics\n- Up to 100 params * Up to 100 tags + // metrics - Up to 100 params * Up to 100 tags // // For example, a valid request might contain 900 metrics, 50 params, and 50 // tags, but logging 900 metrics, 50 params, and 51 tags is invalid. diff --git a/service/pkg.go b/service/pkg.go index 7d0a6d28b..29fcbcff1 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -92,7 +92,7 @@ // // - [scim.AccountServicePrincipalsAPI]: Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms. // -// - [endpoints.ServingEndpointsAPI]: The Serverless Real-Time Inference Serving Endpoints API allows you to create, update, and delete model serving endpoints. +// - [endpoints.ServingEndpointsAPI]: The Serving Endpoints API allows you to create, update, and delete model serving endpoints. // // - [unitycatalog.SharesAPI]: Databricks Delta Sharing: Shares REST API. // diff --git a/service/secrets/api.go b/service/secrets/api.go index 92d1b522c..9657d15a6 100755 --- a/service/secrets/api.go +++ b/service/secrets/api.go @@ -212,11 +212,11 @@ func (a *SecretsAPI) ListSecretsByScope(ctx context.Context, scope string) (*Lis // * `WRITE` - Allowed to read and write to this secret scope. * `READ` - // Allowed to read this secret scope and list what secrets are available. // -// Note that in general, secret values can only be read from within a -// command\non a cluster (for example, through a notebook). There is no API to -// read the actual secret value material outside of a cluster. However, the -// user's permission will be applied based on who is executing the command, and -// they must have at least READ permission. +// Note that in general, secret values can only be read from within a command on +// a cluster (for example, through a notebook). There is no API to read the +// actual secret value material outside of a cluster. However, the user's +// permission will be applied based on who is executing the command, and they +// must have at least READ permission. // // Users must have the `MANAGE` permission to invoke this API. // diff --git a/service/secrets/interface.go b/service/secrets/interface.go index ac63bfeaa..8fc35923e 100755 --- a/service/secrets/interface.go +++ b/service/secrets/interface.go @@ -116,7 +116,7 @@ type SecretsService interface { // available. // // Note that in general, secret values can only be read from within a - // command\non a cluster (for example, through a notebook). There is no API + // command on a cluster (for example, through a notebook). There is no API // to read the actual secret value material outside of a cluster. However, // the user's permission will be applied based on who is executing the // command, and they must have at least READ permission. diff --git a/service/sql/api.go b/service/sql/api.go index f77e4f99f..d78749b88 100755 --- a/service/sql/api.go +++ b/service/sql/api.go @@ -25,10 +25,6 @@ func NewAlerts(client *client.DatabricksClient) *AlertsAPI { // a Databricks SQL object that periodically runs a query, evaluates a condition // of its result, and notifies one or more users and/or alert destinations if // the condition was met. -// -// **Note**: Programmatic operations on refresh schedules via the Databricks SQL -// API are deprecated. Alert refresh schedules can be created, updated, fetched -// and deleted using Jobs API, e.g. :method:jobs/create. type AlertsAPI struct { // impl contains low-level REST API interface, that could be overridden // through WithImpl(AlertsService) @@ -56,18 +52,6 @@ func (a *AlertsAPI) Create(ctx context.Context, request CreateAlert) (*Alert, er return a.impl.Create(ctx, request) } -// [DEPRECATED] Create a refresh schedule. -// -// Creates a new refresh schedule for an alert. -// -// **Note:** The structure of refresh schedules is subject to change. -// -// **Note:** This API is deprecated: Use :method:jobs/create to create a job -// with the alert. -func (a *AlertsAPI) CreateSchedule(ctx context.Context, request CreateRefreshSchedule) (*RefreshSchedule, error) { - return a.impl.CreateSchedule(ctx, request) -} - // Delete an alert. // // Deletes an alert. Deleted alerts are no longer accessible and cannot be @@ -88,31 +72,6 @@ func (a *AlertsAPI) DeleteByAlertId(ctx context.Context, alertId string) error { }) } -// [DEPRECATED] Delete a refresh schedule. -// -// Deletes an alert's refresh schedule. The refresh schedule specifies when to -// refresh and evaluate the associated query result. -// -// **Note:** This API is deprecated: Use :method:jobs/delete to delete a job for -// the alert. -func (a *AlertsAPI) DeleteSchedule(ctx context.Context, request DeleteScheduleRequest) error { - return a.impl.DeleteSchedule(ctx, request) -} - -// [DEPRECATED] Delete a refresh schedule. -// -// Deletes an alert's refresh schedule. The refresh schedule specifies when to -// refresh and evaluate the associated query result. -// -// **Note:** This API is deprecated: Use :method:jobs/delete to delete a job for -// the alert. -func (a *AlertsAPI) DeleteScheduleByAlertIdAndScheduleId(ctx context.Context, alertId string, scheduleId string) error { - return a.impl.DeleteSchedule(ctx, DeleteScheduleRequest{ - AlertId: alertId, - ScheduleId: scheduleId, - }) -} - // Get an alert. // // Gets an alert. @@ -129,34 +88,6 @@ func (a *AlertsAPI) GetByAlertId(ctx context.Context, alertId string) (*Alert, e }) } -// [DEPRECATED] Get an alert's subscriptions. -// -// Get the subscriptions for an alert. An alert subscription represents exactly -// one recipient being notified whenever the alert is triggered. The alert -// recipient is specified by either the `user` field or the `destination` field. -// The `user` field is ignored if `destination` is non-`null`. -// -// **Note:** This API is deprecated: Use :method:jobs/get to get the -// subscriptions associated with a job for an alert. -func (a *AlertsAPI) GetSubscriptions(ctx context.Context, request GetSubscriptionsRequest) ([]Subscription, error) { - return a.impl.GetSubscriptions(ctx, request) -} - -// [DEPRECATED] Get an alert's subscriptions. -// -// Get the subscriptions for an alert. An alert subscription represents exactly -// one recipient being notified whenever the alert is triggered. The alert -// recipient is specified by either the `user` field or the `destination` field. -// The `user` field is ignored if `destination` is non-`null`. -// -// **Note:** This API is deprecated: Use :method:jobs/get to get the -// subscriptions associated with a job for an alert. -func (a *AlertsAPI) GetSubscriptionsByAlertId(ctx context.Context, alertId string) ([]Subscription, error) { - return a.impl.GetSubscriptions(ctx, GetSubscriptionsRequest{ - AlertId: alertId, - }) -} - // Get alerts. // // Gets a list of alerts. @@ -217,71 +148,6 @@ func (a *AlertsAPI) GetByName(ctx context.Context, name string) (*Alert, error) return &alternatives[0], nil } -// [DEPRECATED] Get refresh schedules. -// -// Gets the refresh schedules for the specified alert. Alerts can have refresh -// schedules that specify when to refresh and evaluate the associated query -// result. -// -// **Note:** Although refresh schedules are returned in a list, only one refresh -// schedule per alert is currently supported. The structure of refresh schedules -// is subject to change. -// -// **Note:** This API is deprecated: Use :method:jobs/list to list jobs and -// filter by the alert. -func (a *AlertsAPI) ListSchedules(ctx context.Context, request ListSchedulesRequest) ([]RefreshSchedule, error) { - return a.impl.ListSchedules(ctx, request) -} - -// [DEPRECATED] Get refresh schedules. -// -// Gets the refresh schedules for the specified alert. Alerts can have refresh -// schedules that specify when to refresh and evaluate the associated query -// result. -// -// **Note:** Although refresh schedules are returned in a list, only one refresh -// schedule per alert is currently supported. The structure of refresh schedules -// is subject to change. -// -// **Note:** This API is deprecated: Use :method:jobs/list to list jobs and -// filter by the alert. -func (a *AlertsAPI) ListSchedulesByAlertId(ctx context.Context, alertId string) ([]RefreshSchedule, error) { - return a.impl.ListSchedules(ctx, ListSchedulesRequest{ - AlertId: alertId, - }) -} - -// [DEPRECATED] Subscribe to an alert. -// -// **Note:** This API is deprecated: Use :method:jobs/update to subscribe to a -// job for an alert. -func (a *AlertsAPI) Subscribe(ctx context.Context, request CreateSubscription) (*Subscription, error) { - return a.impl.Subscribe(ctx, request) -} - -// [DEPRECATED] Unsubscribe to an alert. -// -// Unsubscribes a user or a destination to an alert. -// -// **Note:** This API is deprecated: Use :method:jobs/update to unsubscribe to a -// job for an alert. -func (a *AlertsAPI) Unsubscribe(ctx context.Context, request UnsubscribeRequest) error { - return a.impl.Unsubscribe(ctx, request) -} - -// [DEPRECATED] Unsubscribe to an alert. -// -// Unsubscribes a user or a destination to an alert. -// -// **Note:** This API is deprecated: Use :method:jobs/update to unsubscribe to a -// job for an alert. -func (a *AlertsAPI) UnsubscribeByAlertIdAndSubscriptionId(ctx context.Context, alertId string, subscriptionId string) error { - return a.impl.Unsubscribe(ctx, UnsubscribeRequest{ - AlertId: alertId, - SubscriptionId: subscriptionId, - }) -} - // Update an alert. // // Updates an alert. @@ -302,10 +168,6 @@ func NewDashboards(client *client.DatabricksClient) *DashboardsAPI { // query IDs. The API can also be used to duplicate multiple dashboards at once // since you can get a dashboard definition with a GET request and then POST it // to create a new one. -// -// **Note**: Programmatic operations on refresh schedules via the Databricks SQL -// API are deprecated. Dashboard refresh schedules can be created, updated, -// fetched and deleted using Jobs API, e.g. :method:jobs/create. type DashboardsAPI struct { // impl contains low-level REST API interface, that could be overridden // through WithImpl(DashboardsService) @@ -640,11 +502,7 @@ func NewQueries(client *client.DatabricksClient) *QueriesAPI { // These endpoints are used for CRUD operations on query definitions. Query // definitions include the target SQL warehouse, query text, name, description, -// tags, execution schedule, parameters, and visualizations. -// -// **Note**: Programmatic operations on refresh schedules via the Databricks SQL -// API are deprecated. Query refresh schedules can be created, updated, fetched -// and deleted using Jobs API, e.g. :method:jobs/create. +// tags, parameters, and visualizations. type QueriesAPI struct { // impl contains low-level REST API interface, that could be overridden // through WithImpl(QueriesService) @@ -906,8 +764,8 @@ func NewStatementExecution(client *client.DatabricksClient) *StatementExecutionA // asynchronously, based on the `wait_timeout` setting. When set between 5-50 // seconds (default: 10) the call behaves synchronously and waits for results up // to the specified timeout; when set to `0s`, the call is asynchronous and -// responds immediately with a statement ID that can be used to fetch the -// results in a separate call. +// responds immediately with a statement ID that can be used to poll for status +// or fetch the results in a separate call. // // **Call mode: synchronous** // @@ -993,8 +851,8 @@ func NewStatementExecution(client *client.DatabricksClient) *StatementExecutionA // :method:statementexecution/getStatementResultChunkN request. // // When using this mode, each chunk may be fetched once, and in order. A chunk -// without a field `next_chunk_internal_link` indicates we reached the last -// chunk and all chunks have been fetched from the result set. +// without a field `next_chunk_internal_link` indicates the last chunk was +// reached and all chunks have been fetched from the result set. // // **Use case: large result sets with EXTERNAL_LINKS + ARROW_STREAM** // @@ -1087,9 +945,9 @@ func (a *StatementExecutionAPI) CancelExecution(ctx context.Context, request Can return a.impl.CancelExecution(ctx, request) } -// Execute an SQL statement. +// Execute a SQL statement. // -// Execute an SQL statement, and if flagged as such, await its result for a +// Execute a SQL statement, and if flagged as such, await its result for a // specified time. func (a *StatementExecutionAPI) ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*ExecuteStatementResponse, error) { return a.impl.ExecuteStatement(ctx, request) @@ -1097,8 +955,8 @@ func (a *StatementExecutionAPI) ExecuteStatement(ctx context.Context, request Ex // Get status, manifest, and result first chunk. // -// Polls for statement status; when status.state=SUCCEEDED will also return the -// result manifest, and the first chunk of result data. +// Polls for the statement's status; when `status.state=SUCCEEDED` it will also +// return the result manifest and the first chunk of the result data. // // **NOTE** This call currently may take up to 5 seconds to get the latest // status and result. @@ -1108,8 +966,8 @@ func (a *StatementExecutionAPI) GetStatement(ctx context.Context, request GetSta // Get status, manifest, and result first chunk. // -// Polls for statement status; when status.state=SUCCEEDED will also return the -// result manifest, and the first chunk of result data. +// Polls for the statement's status; when `status.state=SUCCEEDED` it will also +// return the result manifest and the first chunk of the result data. // // **NOTE** This call currently may take up to 5 seconds to get the latest // status and result. diff --git a/service/sql/impl.go b/service/sql/impl.go index 3394b2c13..878d33972 100755 --- a/service/sql/impl.go +++ b/service/sql/impl.go @@ -22,25 +22,12 @@ func (a *alertsImpl) Create(ctx context.Context, request CreateAlert) (*Alert, e return &alert, err } -func (a *alertsImpl) CreateSchedule(ctx context.Context, request CreateRefreshSchedule) (*RefreshSchedule, error) { - var refreshSchedule RefreshSchedule - path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v/refresh-schedules", request.AlertId) - err := a.client.Do(ctx, http.MethodPost, path, request, &refreshSchedule) - return &refreshSchedule, err -} - func (a *alertsImpl) Delete(ctx context.Context, request DeleteAlertRequest) error { path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v", request.AlertId) err := a.client.Do(ctx, http.MethodDelete, path, request, nil) return err } -func (a *alertsImpl) DeleteSchedule(ctx context.Context, request DeleteScheduleRequest) error { - path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v/refresh-schedules/%v", request.AlertId, request.ScheduleId) - err := a.client.Do(ctx, http.MethodDelete, path, request, nil) - return err -} - func (a *alertsImpl) Get(ctx context.Context, request GetAlertRequest) (*Alert, error) { var alert Alert path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v", request.AlertId) @@ -48,13 +35,6 @@ func (a *alertsImpl) Get(ctx context.Context, request GetAlertRequest) (*Alert, return &alert, err } -func (a *alertsImpl) GetSubscriptions(ctx context.Context, request GetSubscriptionsRequest) ([]Subscription, error) { - var subscriptionList []Subscription - path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v/subscriptions", request.AlertId) - err := a.client.Do(ctx, http.MethodGet, path, request, &subscriptionList) - return subscriptionList, err -} - func (a *alertsImpl) List(ctx context.Context) ([]Alert, error) { var alertList []Alert path := "/api/2.0/preview/sql/alerts" @@ -62,26 +42,6 @@ func (a *alertsImpl) List(ctx context.Context) ([]Alert, error) { return alertList, err } -func (a *alertsImpl) ListSchedules(ctx context.Context, request ListSchedulesRequest) ([]RefreshSchedule, error) { - var refreshScheduleList []RefreshSchedule - path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v/refresh-schedules", request.AlertId) - err := a.client.Do(ctx, http.MethodGet, path, request, &refreshScheduleList) - return refreshScheduleList, err -} - -func (a *alertsImpl) Subscribe(ctx context.Context, request CreateSubscription) (*Subscription, error) { - var subscription Subscription - path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v/subscriptions", request.AlertId) - err := a.client.Do(ctx, http.MethodPost, path, request, &subscription) - return &subscription, err -} - -func (a *alertsImpl) Unsubscribe(ctx context.Context, request UnsubscribeRequest) error { - path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v/subscriptions/%v", request.AlertId, request.SubscriptionId) - err := a.client.Do(ctx, http.MethodDelete, path, request, nil) - return err -} - func (a *alertsImpl) Update(ctx context.Context, request EditAlert) error { path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v", request.AlertId) err := a.client.Do(ctx, http.MethodPut, path, request, nil) diff --git a/service/sql/interface.go b/service/sql/interface.go index bab29dd53..1c708420a 100755 --- a/service/sql/interface.go +++ b/service/sql/interface.go @@ -10,10 +10,6 @@ import ( // a Databricks SQL object that periodically runs a query, evaluates a condition // of its result, and notifies one or more users and/or alert destinations if // the condition was met. -// -// **Note**: Programmatic operations on refresh schedules via the Databricks SQL -// API are deprecated. Alert refresh schedules can be created, updated, fetched -// and deleted using Jobs API, e.g. :method:jobs/create. type AlertsService interface { // Create an alert. @@ -23,16 +19,6 @@ type AlertsService interface { // alert destinations if the condition was met. Create(ctx context.Context, request CreateAlert) (*Alert, error) - // [DEPRECATED] Create a refresh schedule. - // - // Creates a new refresh schedule for an alert. - // - // **Note:** The structure of refresh schedules is subject to change. - // - // **Note:** This API is deprecated: Use :method:jobs/create to create a job - // with the alert. - CreateSchedule(ctx context.Context, request CreateRefreshSchedule) (*RefreshSchedule, error) - // Delete an alert. // // Deletes an alert. Deleted alerts are no longer accessible and cannot be @@ -40,65 +26,16 @@ type AlertsService interface { // to the trash. Delete(ctx context.Context, request DeleteAlertRequest) error - // [DEPRECATED] Delete a refresh schedule. - // - // Deletes an alert's refresh schedule. The refresh schedule specifies when - // to refresh and evaluate the associated query result. - // - // **Note:** This API is deprecated: Use :method:jobs/delete to delete a job - // for the alert. - DeleteSchedule(ctx context.Context, request DeleteScheduleRequest) error - // Get an alert. // // Gets an alert. Get(ctx context.Context, request GetAlertRequest) (*Alert, error) - // [DEPRECATED] Get an alert's subscriptions. - // - // Get the subscriptions for an alert. An alert subscription represents - // exactly one recipient being notified whenever the alert is triggered. The - // alert recipient is specified by either the `user` field or the - // `destination` field. The `user` field is ignored if `destination` is - // non-`null`. - // - // **Note:** This API is deprecated: Use :method:jobs/get to get the - // subscriptions associated with a job for an alert. - GetSubscriptions(ctx context.Context, request GetSubscriptionsRequest) ([]Subscription, error) - // Get alerts. // // Gets a list of alerts. List(ctx context.Context) ([]Alert, error) - // [DEPRECATED] Get refresh schedules. - // - // Gets the refresh schedules for the specified alert. Alerts can have - // refresh schedules that specify when to refresh and evaluate the - // associated query result. - // - // **Note:** Although refresh schedules are returned in a list, only one - // refresh schedule per alert is currently supported. The structure of - // refresh schedules is subject to change. - // - // **Note:** This API is deprecated: Use :method:jobs/list to list jobs and - // filter by the alert. - ListSchedules(ctx context.Context, request ListSchedulesRequest) ([]RefreshSchedule, error) - - // [DEPRECATED] Subscribe to an alert. - // - // **Note:** This API is deprecated: Use :method:jobs/update to subscribe to - // a job for an alert. - Subscribe(ctx context.Context, request CreateSubscription) (*Subscription, error) - - // [DEPRECATED] Unsubscribe to an alert. - // - // Unsubscribes a user or a destination to an alert. - // - // **Note:** This API is deprecated: Use :method:jobs/update to unsubscribe - // to a job for an alert. - Unsubscribe(ctx context.Context, request UnsubscribeRequest) error - // Update an alert. // // Updates an alert. @@ -110,10 +47,6 @@ type AlertsService interface { // query IDs. The API can also be used to duplicate multiple dashboards at once // since you can get a dashboard definition with a GET request and then POST it // to create a new one. -// -// **Note**: Programmatic operations on refresh schedules via the Databricks SQL -// API are deprecated. Dashboard refresh schedules can be created, updated, -// fetched and deleted using Jobs API, e.g. :method:jobs/create. type DashboardsService interface { // Create a dashboard object. @@ -202,11 +135,7 @@ type DbsqlPermissionsService interface { // These endpoints are used for CRUD operations on query definitions. Query // definitions include the target SQL warehouse, query text, name, description, -// tags, execution schedule, parameters, and visualizations. -// -// **Note**: Programmatic operations on refresh schedules via the Databricks SQL -// API are deprecated. Query refresh schedules can be created, updated, fetched -// and deleted using Jobs API, e.g. :method:jobs/create. +// tags, parameters, and visualizations. type QueriesService interface { // Create a new query definition. @@ -293,8 +222,8 @@ type QueryHistoryService interface { // asynchronously, based on the `wait_timeout` setting. When set between 5-50 // seconds (default: 10) the call behaves synchronously and waits for results up // to the specified timeout; when set to `0s`, the call is asynchronous and -// responds immediately with a statement ID that can be used to fetch the -// results in a separate call. +// responds immediately with a statement ID that can be used to poll for status +// or fetch the results in a separate call. // // **Call mode: synchronous** // @@ -380,8 +309,8 @@ type QueryHistoryService interface { // :method:statementexecution/getStatementResultChunkN request. // // When using this mode, each chunk may be fetched once, and in order. A chunk -// without a field `next_chunk_internal_link` indicates we reached the last -// chunk and all chunks have been fetched from the result set. +// without a field `next_chunk_internal_link` indicates the last chunk was +// reached and all chunks have been fetched from the result set. // // **Use case: large result sets with EXTERNAL_LINKS + ARROW_STREAM** // @@ -456,16 +385,16 @@ type StatementExecutionService interface { // status to see the terminal state. CancelExecution(ctx context.Context, request CancelExecutionRequest) error - // Execute an SQL statement. + // Execute a SQL statement. // - // Execute an SQL statement, and if flagged as such, await its result for a + // Execute a SQL statement, and if flagged as such, await its result for a // specified time. ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*ExecuteStatementResponse, error) // Get status, manifest, and result first chunk. // - // Polls for statement status; when status.state=SUCCEEDED will also return - // the result manifest, and the first chunk of result data. + // Polls for the statement's status; when `status.state=SUCCEEDED` it will + // also return the result manifest and the first chunk of the result data. // // **NOTE** This call currently may take up to 5 seconds to get the latest // status and result. diff --git a/service/sql/model.go b/service/sql/model.go index 949384adf..e102b8a06 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -65,9 +65,6 @@ type AlertOptions struct { // Operator used to compare in alert evaluation: `>`, `>=`, `<`, `<=`, `==`, // `!=` Op string `json:"op"` - // Number of failures encountered during alert refresh. This counter is used - // for sending aggregated alert failure email notifications. - ScheduleFailures int `json:"schedule_failures,omitempty"` // Value used to compare in alert evaluation. Value string `json:"value"` } @@ -296,26 +293,6 @@ type CreateDashboardRequest struct { Widgets []Widget `json:"widgets,omitempty"` } -type CreateRefreshSchedule struct { - AlertId string `json:"-" url:"-"` - // Cron string representing the refresh schedule. - Cron string `json:"cron"` - // ID of the SQL warehouse to refresh with. If `null`, query's SQL warehouse - // will be used to refresh. - DataSourceId string `json:"data_source_id,omitempty"` -} - -type CreateSubscription struct { - // ID of the alert. - AlertId string `json:"alert_id" url:"-"` - // ID of the alert subscriber (if subscribing an alert destination). Alert - // destinations can be configured by admins through the UI. See - // [here](/sql/admin/alert-destinations.html). - DestinationId string `json:"destination_id,omitempty"` - // ID of the alert subscriber (if subscribing a user). - UserId int64 `json:"user_id,omitempty"` -} - type CreateWarehouseRequest struct { // The amount of time in minutes that a SQL Endpoint must be idle (i.e., no // RUNNING queries) before it is automatically stopped. @@ -481,72 +458,14 @@ type DeleteQueryRequest struct { QueryId string `json:"-" url:"-"` } -// [DEPRECATED] Delete a refresh schedule -type DeleteScheduleRequest struct { - AlertId string `json:"-" url:"-"` - - ScheduleId string `json:"-" url:"-"` -} - // Delete a warehouse type DeleteWarehouseRequest struct { // Required. Id of the SQL warehouse. Id string `json:"-" url:"-"` } -// Alert destination subscribed to the alert, if it exists. Alert destinations -// can be configured by admins through the UI. See [here]. -// -// [here]: https://docs.databricks.com/sql/admin/alert-destinations.html -type Destination struct { - // ID of the alert destination. - Id string `json:"id,omitempty"` - // Name of the alert destination. - Name string `json:"name,omitempty"` - // Type of the alert destination. - Type DestinationType `json:"type,omitempty"` -} - -// Type of the alert destination. -type DestinationType string - -const DestinationTypeEmail DestinationType = `email` - -const DestinationTypeHangoutsChat DestinationType = `hangouts_chat` - -const DestinationTypeMattermost DestinationType = `mattermost` - -const DestinationTypeMicrosoftTeams DestinationType = `microsoft_teams` - -const DestinationTypePagerduty DestinationType = `pagerduty` - -const DestinationTypeSlack DestinationType = `slack` - -const DestinationTypeWebhook DestinationType = `webhook` - -// String representation for [fmt.Print] -func (dt *DestinationType) String() string { - return string(*dt) -} - -// Set raw string value and validate it against allowed values -func (dt *DestinationType) Set(v string) error { - switch v { - case `email`, `hangouts_chat`, `mattermost`, `microsoft_teams`, `pagerduty`, `slack`, `webhook`: - *dt = DestinationType(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "email", "hangouts_chat", "mattermost", "microsoft_teams", "pagerduty", "slack", "webhook"`, v) - } -} - -// Type always returns DestinationType to satisfy [pflag.Value] interface -func (dt *DestinationType) Type() string { - return "DestinationType" -} - -// The fetch disposition provides for two modes of fetching results: `INLINE`, -// and `EXTERNAL_LINKS`. +// The fetch disposition provides two modes of fetching results: `INLINE` and +// `EXTERNAL_LINKS`. // // Statements executed with `INLINE` disposition will return result data inline, // in `JSON_ARRAY` format, in a series of chunks. If a given statement produces @@ -795,17 +714,17 @@ type EndpointTags struct { } type ExecuteStatementRequest struct { - // Applies given byte limit to execution and result size; byte counts based - // upon internal representations, and may not match measureable sizes in - // requested `format`. + // Applies the given byte limit to the statement's result size. Byte counts + // are based on internal representations and may not match measurable sizes + // in the requested `format`. ByteLimit int64 `json:"byte_limit,omitempty"` // Sets default catalog for statement execution, similar to [`USE CATALOG`] // in SQL. // // [`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html Catalog string `json:"catalog,omitempty"` - // The fetch disposition provides for two modes of fetching results: - // `INLINE`, and `EXTERNAL_LINKS`. + // The fetch disposition provides two modes of fetching results: `INLINE` + // and `EXTERNAL_LINKS`. // // Statements executed with `INLINE` disposition will return result data // inline, in `JSON_ARRAY` format, in a series of chunks. If a given @@ -854,20 +773,19 @@ type ExecuteStatementRequest struct { // // When specifying `format=ARROW_STREAM`, results fetched through // `external_links` will be chunks of result data, formatted as Apache Arrow - // Stream. See - // [https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format] - // for more details. + // Stream. See [Apache Arrow Streaming Format] for more details. + // + // [Apache Arrow Streaming Format]: https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format Format Format `json:"format,omitempty"` - // When called in synchronous mode (`wait_timeout > 0s`), determines action - // when timeout reached: + // When in synchronous mode with `wait_timeout > 0s` it determines the + // action taken when the timeout is reached: + // + // `CONTINUE` → the statement execution continues asynchronously and the + // call returns a statement ID immediately. // - // `CONTINUE` → statement execution continues asynchronously; the call - // returns a statement ID immediately. `CANCEL` → statement execution - // canceled; call returns immediately with `CANCELED` state. + // `CANCEL` → the statement execution is canceled and the call returns + // immediately with a `CANCELED` state. OnWaitTimeout TimeoutAction `json:"on_wait_timeout,omitempty"` - // Applies given row limit to execution and result set, identical in - // semantics to SQL term `LIMIT $N`. - RowLimit int64 `json:"row_limit,omitempty"` // Sets default schema for statement execution, similar to [`USE SCHEMA`] in // SQL. // @@ -875,9 +793,9 @@ type ExecuteStatementRequest struct { Schema string `json:"schema,omitempty"` // SQL statement to execute Statement string `json:"statement,omitempty"` - // Time that the API service will wait for the statement result, in format - // '{N}s'. N may be '0s' for asynchronous, or may wait between 5-50 - // seconds." + // The time in seconds the API service will wait for the statement's result + // set as `Ns`, where `N` can be set to 0 or to a value between 5 and 50. + // When set to '0s' the statement will execute in asynchronous mode." WaitTimeout string `json:"wait_timeout,omitempty"` // Warehouse upon which to execute a statement. See also [What are SQL // warehouses?](/sql/admin/warehouse-type.html) @@ -943,9 +861,9 @@ type ExternalLink struct { // // When specifying `format=ARROW_STREAM`, results fetched through // `external_links` will be chunks of result data, formatted as Apache Arrow -// Stream. See -// [https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format] for -// more details. +// Stream. See [Apache Arrow Streaming Format] for more details. +// +// [Apache Arrow Streaming Format]: https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format type Format string const FormatArrowStream Format = `ARROW_STREAM` @@ -1031,11 +949,6 @@ type GetStatementResultChunkNRequest struct { StatementId string `json:"-" url:"-"` } -// [DEPRECATED] Get an alert's subscriptions -type GetSubscriptionsRequest struct { - AlertId string `json:"-" url:"-"` -} - // Get warehouse info type GetWarehouseRequest struct { // Required. Id of the SQL warehouse. @@ -1237,9 +1150,6 @@ type ListQueriesRequest struct { // // - `created_at`: The timestamp the query was created. // - // - `schedule`: [DEPRECATED] Sorting results by refresh schedule is - // deprecated. Use :method:jobs/list to list jobs and filter for a query. - // // - `runtime`: The time it took to run this query. This is blank for // parameterized queries. A blank value is treated as the highest value for // sorting. @@ -1288,11 +1198,6 @@ type ListResponse struct { Results []Dashboard `json:"results,omitempty"` } -// [DEPRECATED] Get refresh schedules -type ListSchedulesRequest struct { - AlertId string `json:"-" url:"-"` -} - // List warehouses type ListWarehousesRequest struct { // Service Principal which will be used to fetch the list of endpoints. If @@ -1574,8 +1479,6 @@ type Query struct { // A SHA-256 hash of the query text along with the authenticated user ID. QueryHash string `json:"query_hash,omitempty"` - Schedule *QueryInterval `json:"schedule,omitempty"` - Tags []string `json:"tags,omitempty"` // The timestamp at which this query was last updated. UpdatedAt string `json:"updated_at,omitempty"` @@ -1603,11 +1506,6 @@ type QueryEditContent struct { Query string `json:"query,omitempty"` QueryId string `json:"-" url:"-"` - // JSON object that describes the scheduled execution frequency. A schedule - // object includes `interval`, `time`, `day_of_week`, and `until` fields. If - // a scheduled is supplied, then only `interval` is required. All other - // field can be `null`. - Schedule *QueryInterval `json:"schedule,omitempty"` } // A filter to limit query history results. This field is optional. @@ -1670,17 +1568,6 @@ type QueryInfo struct { WarehouseId string `json:"warehouse_id,omitempty"` } -type QueryInterval struct { - // For weekly runs, the day of the week to start the run. - DayOfWeek string `json:"day_of_week,omitempty"` - // Integer number of seconds between runs. - Interval int `json:"interval,omitempty"` - // For daily, weekly, and monthly runs, the time of day to start the run. - Time string `json:"time,omitempty"` - // A date after which this schedule no longer applies. - Until string `json:"until,omitempty"` -} - type QueryList struct { // The total number of queries. Count int `json:"count,omitempty"` @@ -1774,11 +1661,6 @@ type QueryPostContent struct { Parent string `json:"parent,omitempty"` // The text of the query. Query string `json:"query,omitempty"` - // JSON object that describes the scheduled execution frequency. A schedule - // object includes `interval`, `time`, `day_of_week`, and `until` fields. If - // a scheduled is supplied, then only `interval` is required. All other - // field can be `null`. - Schedule *QueryInterval `json:"schedule,omitempty"` } // Type of statement for this query @@ -1888,16 +1770,6 @@ func (qs *QueryStatus) Type() string { return "QueryStatus" } -type RefreshSchedule struct { - // Cron string representing the refresh schedule. - Cron string `json:"cron,omitempty"` - // ID of the SQL warehouse to refresh with. If `null`, query's SQL warehouse - // will be used to refresh. - DataSourceId string `json:"data_source_id,omitempty"` - // ID of the refresh schedule. - Id string `json:"id,omitempty"` -} - type RepeatedEndpointConfPairs struct { // Deprecated: Use configuration_pairs ConfigPair []EndpointConfPair `json:"config_pair,omitempty"` @@ -1967,9 +1839,9 @@ type ResultManifest struct { // // When specifying `format=ARROW_STREAM`, results fetched through // `external_links` will be chunks of result data, formatted as Apache Arrow - // Stream. See - // [https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format] - // for more details. + // Stream. See [Apache Arrow Streaming Format] for more details. + // + // [Apache Arrow Streaming Format]: https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format Format Format `json:"format,omitempty"` // Schema is an ordered list of column descriptions. Schema *ResultSchema `json:"schema,omitempty"` @@ -2293,20 +2165,6 @@ type StopRequest struct { Id string `json:"-" url:"-"` } -type Subscription struct { - // ID of the alert. - AlertId string `json:"alert_id,omitempty"` - // Alert destination subscribed to the alert, if it exists. Alert - // destinations can be configured by admins through the UI. See [here]. - // - // [here]: https://docs.databricks.com/sql/admin/alert-destinations.html - Destination *Destination `json:"destination,omitempty"` - // ID of the alert subscription. - Id string `json:"id,omitempty"` - - User *User `json:"user,omitempty"` -} - type Success struct { Message SuccessMessage `json:"message,omitempty"` } @@ -2567,12 +2425,14 @@ type TimeRange struct { StartTimeMs int `json:"start_time_ms,omitempty"` } -// When called in synchronous mode (`wait_timeout > 0s`), determines action when -// timeout reached: +// When in synchronous mode with `wait_timeout > 0s` it determines the action +// taken when the timeout is reached: +// +// `CONTINUE` → the statement execution continues asynchronously and the call +// returns a statement ID immediately. // -// `CONTINUE` → statement execution continues asynchronously; the call returns -// a statement ID immediately. `CANCEL` → statement execution canceled; call -// returns immediately with `CANCELED` state. +// `CANCEL` → the statement execution is canceled and the call returns +// immediately with a `CANCELED` state. type TimeoutAction string const TimeoutActionCancel TimeoutAction = `CANCEL` @@ -2615,13 +2475,6 @@ type TransferOwnershipRequest struct { ObjectType OwnableObjectType `json:"-" url:"-"` } -// [DEPRECATED] Unsubscribe to an alert -type UnsubscribeRequest struct { - AlertId string `json:"-" url:"-"` - - SubscriptionId string `json:"-" url:"-"` -} - type User struct { Email string `json:"email,omitempty"` diff --git a/service/unitycatalog/model.go b/service/unitycatalog/model.go index 737217aa7..cc797f2dc 100755 --- a/service/unitycatalog/model.go +++ b/service/unitycatalog/model.go @@ -65,6 +65,11 @@ type CatalogInfo struct { CreatedAt int64 `json:"created_at,omitempty"` // Username of catalog creator. CreatedBy string `json:"created_by,omitempty"` + + EffectiveAutoMaintenanceFlag *EffectiveAutoMaintenanceFlag `json:"effective_auto_maintenance_flag,omitempty"` + // Whether auto maintenance should be enabled for this object and objects + // under it. + EnableAutoMaintenance EnableAutoMaintenance `json:"enable_auto_maintenance,omitempty"` // Unique identifier of parent metastore. MetastoreId string `json:"metastore_id,omitempty"` // Name of catalog. @@ -455,10 +460,12 @@ type CreateRecipient struct { AuthenticationType AuthenticationType `json:"authentication_type"` // Description about the recipient. Comment string `json:"comment,omitempty"` - // The global Unity Catalog metastore id provided by the data recipient.\n + // The global Unity Catalog metastore id provided by the data recipient. + // // This field is required when the __authentication_type__ is - // **DATABRICKS**.\n The identifier is of format - // __cloud__:__region__:__metastore-uuid__. + // **DATABRICKS**. + // + // The identifier is of format __cloud__:__region__:__metastore-uuid__. DataRecipientGlobalMetastoreId any `json:"data_recipient_global_metastore_id,omitempty"` // IP Access List IpAccessList *IpAccessList `json:"ip_access_list,omitempty"` @@ -466,7 +473,7 @@ type CreateRecipient struct { Name string `json:"name"` // Username of the recipient owner. Owner string `json:"owner,omitempty"` - // Recipient properties as map of string key-value pairs.\n + // Recipient properties as map of string key-value pairs. PropertiesKvpairs any `json:"properties_kvpairs,omitempty"` // The one-time sharing code provided by the data recipient. This field is // required when the __authentication_type__ is **DATABRICKS**. @@ -646,7 +653,8 @@ type DeleteStorageCredentialRequest struct { // Delete a table constraint type DeleteTableConstraintRequest struct { - // If true, try deleting all child constraints of the current constraint.\n + // If true, try deleting all child constraints of the current constraint. + // // If false, reject this operation if the current constraint has any child // constraints. Cascade bool `json:"-" url:"cascade"` @@ -671,6 +679,47 @@ type Dependency struct { Table *TableDependency `json:"table,omitempty"` } +type EffectiveAutoMaintenanceFlag struct { + // The name of the object from which the flag was inherited. If there was no + // inheritance, this field is left blank. + InheritedFromName string `json:"inherited_from_name,omitempty"` + // The type of the object from which the flag was inherited. If there was no + // inheritance, this field is left blank. + InheritedFromType EffectiveAutoMaintenanceFlagInheritedFromType `json:"inherited_from_type,omitempty"` + // Whether auto maintenance should be enabled for this object and objects + // under it. + Value EnableAutoMaintenance `json:"value"` +} + +// The type of the object from which the flag was inherited. If there was no +// inheritance, this field is left blank. +type EffectiveAutoMaintenanceFlagInheritedFromType string + +const EffectiveAutoMaintenanceFlagInheritedFromTypeCatalog EffectiveAutoMaintenanceFlagInheritedFromType = `CATALOG` + +const EffectiveAutoMaintenanceFlagInheritedFromTypeSchema EffectiveAutoMaintenanceFlagInheritedFromType = `SCHEMA` + +// String representation for [fmt.Print] +func (eamfift *EffectiveAutoMaintenanceFlagInheritedFromType) String() string { + return string(*eamfift) +} + +// Set raw string value and validate it against allowed values +func (eamfift *EffectiveAutoMaintenanceFlagInheritedFromType) Set(v string) error { + switch v { + case `CATALOG`, `SCHEMA`: + *eamfift = EffectiveAutoMaintenanceFlagInheritedFromType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CATALOG", "SCHEMA"`, v) + } +} + +// Type always returns EffectiveAutoMaintenanceFlagInheritedFromType to satisfy [pflag.Value] interface +func (eamfift *EffectiveAutoMaintenanceFlagInheritedFromType) Type() string { + return "EffectiveAutoMaintenanceFlagInheritedFromType" +} + type EffectivePermissionsList struct { // The privileges conveyed to each principal (either directly or via // inheritance) @@ -678,11 +727,13 @@ type EffectivePermissionsList struct { } type EffectivePrivilege struct { - // The full name of the object that conveys this privilege via - // inheritance.\n This field is omitted when privilege is not inherited - // (it's assigned to the securable itself). + // The full name of the object that conveys this privilege via inheritance. + // + // This field is omitted when privilege is not inherited (it's assigned to + // the securable itself). InheritedFromName string `json:"inherited_from_name,omitempty"` - // The type of the object that conveys this privilege via inheritance.\n + // The type of the object that conveys this privilege via inheritance. + // // This field is omitted when privilege is not inherited (it's assigned to // the securable itself). InheritedFromType SecurableType `json:"inherited_from_type,omitempty"` @@ -698,6 +749,37 @@ type EffectivePrivilegeAssignment struct { Privileges []EffectivePrivilege `json:"privileges,omitempty"` } +// Whether auto maintenance should be enabled for this object and objects under +// it. +type EnableAutoMaintenance string + +const EnableAutoMaintenanceDisable EnableAutoMaintenance = `DISABLE` + +const EnableAutoMaintenanceEnable EnableAutoMaintenance = `ENABLE` + +const EnableAutoMaintenanceInherit EnableAutoMaintenance = `INHERIT` + +// String representation for [fmt.Print] +func (eam *EnableAutoMaintenance) String() string { + return string(*eam) +} + +// Set raw string value and validate it against allowed values +func (eam *EnableAutoMaintenance) Set(v string) error { + switch v { + case `DISABLE`, `ENABLE`, `INHERIT`: + *eam = EnableAutoMaintenance(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISABLE", "ENABLE", "INHERIT"`, v) + } +} + +// Type always returns EnableAutoMaintenance to satisfy [pflag.Value] interface +func (eam *EnableAutoMaintenance) Type() string { + return "EnableAutoMaintenance" +} + type ExternalLocationInfo struct { // User-provided free-form text description. Comment string `json:"comment,omitempty"` @@ -1642,10 +1724,12 @@ type RecipientInfo struct { CreatedAt int64 `json:"created_at,omitempty"` // Username of recipient creator. CreatedBy string `json:"created_by,omitempty"` - // The global Unity Catalog metastore id provided by the data recipient.\n + // The global Unity Catalog metastore id provided by the data recipient. + // // This field is only present when the __authentication_type__ is - // **DATABRICKS**.\n The identifier is of format - // __cloud__:__region__:__metastore-uuid__. + // **DATABRICKS**. + // + // The identifier is of format __cloud__:__region__:__metastore-uuid__. DataRecipientGlobalMetastoreId any `json:"data_recipient_global_metastore_id,omitempty"` // IP Access List IpAccessList *IpAccessList `json:"ip_access_list,omitempty"` @@ -1656,7 +1740,7 @@ type RecipientInfo struct { Name string `json:"name,omitempty"` // Username of the recipient owner. Owner string `json:"owner,omitempty"` - // Recipient properties as map of string key-value pairs.\n + // Recipient properties as map of string key-value pairs. PropertiesKvpairs any `json:"properties_kvpairs,omitempty"` // Cloud region of the recipient's Unity Catalog Metstore. This field is // only present when the __authentication_type__ is **DATABRICKS**. @@ -1737,6 +1821,11 @@ type SchemaInfo struct { CreatedAt int64 `json:"created_at,omitempty"` // Username of schema creator. CreatedBy string `json:"created_by,omitempty"` + + EffectiveAutoMaintenanceFlag *EffectiveAutoMaintenanceFlag `json:"effective_auto_maintenance_flag,omitempty"` + // Whether auto maintenance should be enabled for this object and objects + // under it. + EnableAutoMaintenance EnableAutoMaintenance `json:"enable_auto_maintenance,omitempty"` // Full name of schema, in form of __catalog_name__.__schema_name__. FullName string `json:"full_name,omitempty"` // Unique identifier of parent metastore. @@ -2013,6 +2102,11 @@ type TableInfo struct { DeletedAt int64 `json:"deleted_at,omitempty"` // Information pertaining to current state of the delta table. DeltaRuntimePropertiesKvpairs any `json:"delta_runtime_properties_kvpairs,omitempty"` + + EffectiveAutoMaintenanceFlag *EffectiveAutoMaintenanceFlag `json:"effective_auto_maintenance_flag,omitempty"` + // Whether auto maintenance should be enabled for this object and objects + // under it. + EnableAutoMaintenance EnableAutoMaintenance `json:"enable_auto_maintenance,omitempty"` // Full name of table, in form of // __catalog_name__.__schema_name__.__table_name__ FullName string `json:"full_name,omitempty"` @@ -2240,10 +2334,11 @@ type UpdateRecipient struct { Name string `json:"name,omitempty" url:"-"` // Username of the recipient owner. Owner string `json:"owner,omitempty"` - // Recipient properties as map of string key-value pairs.\n When provided in - // update request, the specified properties will override the existing - // properties. To add and remove properties, one would need to perform a - // read-modify-write. + // Recipient properties as map of string key-value pairs. + // + // When provided in update request, the specified properties will override + // the existing properties. To add and remove properties, one would need to + // perform a read-modify-write. PropertiesKvpairs any `json:"properties_kvpairs,omitempty"` } diff --git a/service/workspace/api.go b/service/workspace/api.go index c210005de..54605c4bc 100755 --- a/service/workspace/api.go +++ b/service/workspace/api.go @@ -173,7 +173,7 @@ func (a *WorkspaceAPI) GetByPath(ctx context.Context, name string) (*ObjectInfo, // input path, this call returns an error `RESOURCE_ALREADY_EXISTS`. // // Note that if this operation fails it may have succeeded in creating some of -// the necessary\nparrent directories. +// the necessary parrent directories. func (a *WorkspaceAPI) Mkdirs(ctx context.Context, request Mkdirs) error { return a.impl.Mkdirs(ctx, request) } @@ -185,7 +185,7 @@ func (a *WorkspaceAPI) Mkdirs(ctx context.Context, request Mkdirs) error { // input path, this call returns an error `RESOURCE_ALREADY_EXISTS`. // // Note that if this operation fails it may have succeeded in creating some of -// the necessary\nparrent directories. +// the necessary parrent directories. func (a *WorkspaceAPI) MkdirsByPath(ctx context.Context, path string) error { return a.impl.Mkdirs(ctx, Mkdirs{ Path: path, diff --git a/service/workspace/interface.go b/service/workspace/interface.go index 63ff3b0e7..ba18a8eb6 100755 --- a/service/workspace/interface.go +++ b/service/workspace/interface.go @@ -67,6 +67,6 @@ type WorkspaceService interface { // the input path, this call returns an error `RESOURCE_ALREADY_EXISTS`. // // Note that if this operation fails it may have succeeded in creating some - // of the necessary\nparrent directories. + // of the necessary parrent directories. Mkdirs(ctx context.Context, request Mkdirs) error } diff --git a/workspace_client.go b/workspace_client.go index cc96ab185..95d3fd10e 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -38,10 +38,6 @@ type WorkspaceClient struct { // is a Databricks SQL object that periodically runs a query, evaluates a // condition of its result, and notifies one or more users and/or alert // destinations if the condition was met. - // - // **Note**: Programmatic operations on refresh schedules via the Databricks - // SQL API are deprecated. Alert refresh schedules can be created, updated, - // fetched and deleted using Jobs API, e.g. :method:jobs/create. Alerts *sql.AlertsAPI // A catalog is the first layer of Unity Catalog’s three-level namespace. @@ -121,10 +117,6 @@ type WorkspaceClient struct { // collection of related query IDs. The API can also be used to duplicate // multiple dashboards at once since you can get a dashboard definition with // a GET request and then POST it to create a new one. - // - // **Note**: Programmatic operations on refresh schedules via the Databricks - // SQL API are deprecated. Dashboard refresh schedules can be created, - // updated, fetched and deleted using Jobs API, e.g. :method:jobs/create. Dashboards *sql.DashboardsAPI // This API is provided to assist you in making new query objects. When @@ -388,11 +380,7 @@ type WorkspaceClient struct { // These endpoints are used for CRUD operations on query definitions. Query // definitions include the target SQL warehouse, query text, name, - // description, tags, execution schedule, parameters, and visualizations. - // - // **Note**: Programmatic operations on refresh schedules via the Databricks - // SQL API are deprecated. Query refresh schedules can be created, updated, - // fetched and deleted using Jobs API, e.g. :method:jobs/create. + // description, tags, parameters, and visualizations. Queries *sql.QueriesAPI // Access the history of queries through SQL warehouses. @@ -451,8 +439,8 @@ type WorkspaceClient struct { // data by accident. ServicePrincipals *scim.ServicePrincipalsAPI - // The Serverless Real-Time Inference Serving Endpoints API allows you to - // create, update, and delete model serving endpoints. + // The Serving Endpoints API allows you to create, update, and delete model + // serving endpoints. // // You can use a serving endpoint to serve models from the Databricks Model // Registry. Endpoints expose the underlying models as scalable REST API @@ -492,7 +480,7 @@ type WorkspaceClient struct { // 5-50 seconds (default: 10) the call behaves synchronously and waits for // results up to the specified timeout; when set to `0s`, the call is // asynchronous and responds immediately with a statement ID that can be - // used to fetch the results in a separate call. + // used to poll for status or fetch the results in a separate call. // // **Call mode: synchronous** // @@ -581,8 +569,8 @@ type WorkspaceClient struct { // :method:statementexecution/getStatementResultChunkN request. // // When using this mode, each chunk may be fetched once, and in order. A - // chunk without a field `next_chunk_internal_link` indicates we reached the - // last chunk and all chunks have been fetched from the result set. + // chunk without a field `next_chunk_internal_link` indicates the last chunk + // was reached and all chunks have been fetched from the result set. // // **Use case: large result sets with EXTERNAL_LINKS + ARROW_STREAM** //