From 05e57f9e2dacc4a16ffa21d72ef2d98b32e2c6dc Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Tue, 17 Dec 2024 12:19:42 +0100 Subject: [PATCH] [Release] Release v0.38.0 (#391) ### API Changes: * Added `accountClient.federationPolicy()` service and `accountClient.servicePrincipalFederationPolicy()` service. * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.ClusterAttributes`. * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.ClusterDetails`. * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.ClusterSpec`. * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.CreateCluster`. * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.EditCluster`. * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.UpdateClusterResource`. * Added `updateParameterSyntax` field for `com.databricks.sdk.service.dashboards.MigrateDashboardRequest`. * Added `cleanRoomsNotebookTask` field for `com.databricks.sdk.service.jobs.RunTask`. * Added `cleanRoomsNotebookTask` field for `com.databricks.sdk.service.jobs.SubmitTask`. * Added `cleanRoomsNotebookTask` field for `com.databricks.sdk.service.jobs.Task`. * Changed `daysOfWeek` field for `com.databricks.sdk.service.pipelines.RestartWindow` to type `com.databricks.sdk.service.pipelines.RestartWindowDaysOfWeekList` class. OpenAPI SHA: a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d, Date: 2024-12-16 --- .codegen/_openapi_sha | 2 +- .gitattributes | 21 +++ CHANGELOG.md | 20 +++ databricks-sdk-java/pom.xml | 2 +- .../com/databricks/sdk/AccountClient.java | 132 ++++++++++++++++ .../catalog/GetBindingsSecurableType.java | 6 +- .../catalog/UpdateBindingsSecurableType.java | 6 +- .../service/compute/ClusterAttributes.java | 85 ++++++++++- .../sdk/service/compute/ClusterDetails.java | 85 ++++++++++- .../sdk/service/compute/ClusterSpec.java | 85 ++++++++++- .../sdk/service/compute/CreateCluster.java | 85 ++++++++++- .../sdk/service/compute/DataSecurityMode.java | 25 ++- .../sdk/service/compute/EditCluster.java | 85 ++++++++++- .../databricks/sdk/service/compute/Kind.java | 18 +++ .../compute/UpdateClusterResource.java | 85 ++++++++++- .../dashboards/MigrateDashboardRequest.java | 22 ++- .../databricks/sdk/service/jobs/BaseRun.java | 3 +- .../service/jobs/CleanRoomTaskRunState.java | 2 +- .../service/jobs/CleanRoomsNotebookTask.java | 94 ++++++++++++ .../sdk/service/jobs/JobsHealthMetric.java | 16 +- .../sdk/service/jobs/JobsHealthRule.java | 8 +- .../com/databricks/sdk/service/jobs/Run.java | 3 +- .../databricks/sdk/service/jobs/RunTask.java | 20 +++ .../sdk/service/jobs/SubmitTask.java | 22 ++- .../com/databricks/sdk/service/jobs/Task.java | 22 ++- .../sdk/service/jobs/TriggerType.java | 3 +- .../oauth2/AccountFederationPolicyAPI.java | 120 +++++++++++++++ .../oauth2/AccountFederationPolicyImpl.java | 70 +++++++++ .../AccountFederationPolicyService.java | 72 +++++++++ .../CreateAccountFederationPolicyRequest.java | 65 ++++++++ ...rvicePrincipalFederationPolicyRequest.java | 82 ++++++++++ .../DeleteAccountFederationPolicyRequest.java | 44 ++++++ ...rvicePrincipalFederationPolicyRequest.java | 60 ++++++++ .../sdk/service/oauth2/FederationPolicy.java | 122 +++++++++++++++ .../GetAccountFederationPolicyRequest.java | 44 ++++++ ...rvicePrincipalFederationPolicyRequest.java | 59 +++++++ .../ListAccountFederationPoliciesRequest.java | 62 ++++++++ .../ListFederationPoliciesResponse.java | 60 ++++++++ ...icePrincipalFederationPoliciesRequest.java | 79 ++++++++++ .../service/oauth2/OidcFederationPolicy.java | 123 +++++++++++++++ .../ServicePrincipalFederationPolicyAPI.java | 144 ++++++++++++++++++ .../ServicePrincipalFederationPolicyImpl.java | 81 ++++++++++ ...rvicePrincipalFederationPolicyService.java | 76 +++++++++ .../UpdateAccountFederationPolicyRequest.java | 81 ++++++++++ ...rvicePrincipalFederationPolicyRequest.java | 97 ++++++++++++ .../sdk/service/pipelines/RestartWindow.java | 7 +- examples/docs/pom.xml | 2 +- examples/spring-boot-oauth-u2m-demo/pom.xml | 2 +- pom.xml | 2 +- shaded/pom.xml | 2 +- 50 files changed, 2429 insertions(+), 84 deletions(-) create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Kind.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomsNotebookTask.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyAPI.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyImpl.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyService.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateAccountFederationPolicyRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateServicePrincipalFederationPolicyRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteAccountFederationPolicyRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteServicePrincipalFederationPolicyRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/FederationPolicy.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetAccountFederationPolicyRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetServicePrincipalFederationPolicyRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListAccountFederationPoliciesRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListFederationPoliciesResponse.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListServicePrincipalFederationPoliciesRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/OidcFederationPolicy.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyAPI.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyImpl.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyService.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateAccountFederationPolicyRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateServicePrincipalFederationPolicyRequest.java diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 68cd2f4be..8622b29ca 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -7016dcbf2e011459416cf408ce21143bcc4b3a25 \ No newline at end of file +a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 7b5558d02..911142118 100755 --- a/.gitattributes +++ b/.gitattributes @@ -694,6 +694,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstancePro databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstanceProfilesAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstanceProfilesImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstanceProfilesService.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Kind.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Language.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/LibrariesAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/LibrariesImpl.java linguist-generated=true @@ -999,6 +1000,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CancelRunRespo databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomTaskRunLifeCycleState.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomTaskRunResultState.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomTaskRunState.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomsNotebookTask.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/ClusterInstance.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/ClusterSpec.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Condition.java linguist-generated=true @@ -1505,22 +1507,31 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/UpdateRun.java l databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/UpdateRunResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/UpdateRunStatus.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/UpdateWebhookResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyAPI.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyImpl.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyService.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateAccountFederationPolicyRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateCustomAppIntegration.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateCustomAppIntegrationOutput.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreatePublishedAppIntegration.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreatePublishedAppIntegrationOutput.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateServicePrincipalFederationPolicyRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateServicePrincipalSecretRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateServicePrincipalSecretResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CustomAppIntegrationAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CustomAppIntegrationImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CustomAppIntegrationService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DataPlaneInfo.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteAccountFederationPolicyRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteCustomAppIntegrationOutput.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteCustomAppIntegrationRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeletePublishedAppIntegrationOutput.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeletePublishedAppIntegrationRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteServicePrincipalFederationPolicyRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteServicePrincipalSecretRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/FederationPolicy.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetAccountFederationPolicyRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetCustomAppIntegrationOutput.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetCustomAppIntegrationRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetCustomAppIntegrationsOutput.java linguist-generated=true @@ -1528,27 +1539,37 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetPublished databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetPublishedAppIntegrationRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetPublishedAppIntegrationsOutput.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetPublishedAppsOutput.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetServicePrincipalFederationPolicyRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListAccountFederationPoliciesRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListCustomAppIntegrationsRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListFederationPoliciesResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListOAuthPublishedAppsRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListPublishedAppIntegrationsRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListServicePrincipalFederationPoliciesRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListServicePrincipalSecretsRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListServicePrincipalSecretsResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/OAuthPublishedAppsAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/OAuthPublishedAppsImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/OAuthPublishedAppsService.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/OidcFederationPolicy.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/PublishedAppIntegrationAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/PublishedAppIntegrationImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/PublishedAppIntegrationService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/PublishedAppOutput.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/SecretInfo.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyAPI.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyImpl.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalSecretsAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalSecretsImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalSecretsService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/TokenAccessPolicy.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateAccountFederationPolicyRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateCustomAppIntegration.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateCustomAppIntegrationOutput.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdatePublishedAppIntegration.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdatePublishedAppIntegrationOutput.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateServicePrincipalFederationPolicyRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipelineResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CronTrigger.java linguist-generated=true diff --git a/CHANGELOG.md b/CHANGELOG.md index 0781ce290..634a9d1ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,25 @@ # Version changelog +## [Release] Release v0.38.0 + +### API Changes: + + * Added `accountClient.federationPolicy()` service and `accountClient.servicePrincipalFederationPolicy()` service. + * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.ClusterAttributes`. + * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.ClusterDetails`. + * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.ClusterSpec`. + * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.CreateCluster`. + * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.EditCluster`. + * Added `isSingleNode`, `kind` and `useMlRuntime` fields for `com.databricks.sdk.service.compute.UpdateClusterResource`. + * Added `updateParameterSyntax` field for `com.databricks.sdk.service.dashboards.MigrateDashboardRequest`. + * Added `cleanRoomsNotebookTask` field for `com.databricks.sdk.service.jobs.RunTask`. + * Added `cleanRoomsNotebookTask` field for `com.databricks.sdk.service.jobs.SubmitTask`. + * Added `cleanRoomsNotebookTask` field for `com.databricks.sdk.service.jobs.Task`. + * Changed `daysOfWeek` field for `com.databricks.sdk.service.pipelines.RestartWindow` to type `com.databricks.sdk.service.pipelines.RestartWindowDaysOfWeekList` class. + +OpenAPI SHA: a6a317df8327c9b1e5cb59a03a42ffa2aabeef6d, Date: 2024-12-16 + + ## [Release] Release v0.37.0 ### Internal Changes diff --git a/databricks-sdk-java/pom.xml b/databricks-sdk-java/pom.xml index 5b03fb3f0..0fd6a5edf 100644 --- a/databricks-sdk-java/pom.xml +++ b/databricks-sdk-java/pom.xml @@ -5,7 +5,7 @@ com.databricks databricks-sdk-parent - 0.37.0 + 0.38.0 databricks-sdk-java diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java index 5df917515..2176b3c8c 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java @@ -30,12 +30,16 @@ import com.databricks.sdk.service.iam.AccountUsersService; import com.databricks.sdk.service.iam.WorkspaceAssignmentAPI; import com.databricks.sdk.service.iam.WorkspaceAssignmentService; +import com.databricks.sdk.service.oauth2.AccountFederationPolicyAPI; +import com.databricks.sdk.service.oauth2.AccountFederationPolicyService; import com.databricks.sdk.service.oauth2.CustomAppIntegrationAPI; import com.databricks.sdk.service.oauth2.CustomAppIntegrationService; import com.databricks.sdk.service.oauth2.OAuthPublishedAppsAPI; import com.databricks.sdk.service.oauth2.OAuthPublishedAppsService; import com.databricks.sdk.service.oauth2.PublishedAppIntegrationAPI; import com.databricks.sdk.service.oauth2.PublishedAppIntegrationService; +import com.databricks.sdk.service.oauth2.ServicePrincipalFederationPolicyAPI; +import com.databricks.sdk.service.oauth2.ServicePrincipalFederationPolicyService; import com.databricks.sdk.service.oauth2.ServicePrincipalSecretsAPI; import com.databricks.sdk.service.oauth2.ServicePrincipalSecretsService; import com.databricks.sdk.service.provisioning.*; @@ -72,6 +76,7 @@ public class AccountClient { private CredentialsAPI credentialsAPI; private CustomAppIntegrationAPI customAppIntegrationAPI; private EncryptionKeysAPI encryptionKeysAPI; + private AccountFederationPolicyAPI federationPolicyAPI; private AccountGroupsAPI groupsAPI; private AccountIpAccessListsAPI ipAccessListsAPI; private LogDeliveryAPI logDeliveryAPI; @@ -82,6 +87,7 @@ public class AccountClient { private OAuthPublishedAppsAPI oAuthPublishedAppsAPI; private PrivateAccessAPI privateAccessAPI; private PublishedAppIntegrationAPI publishedAppIntegrationAPI; + private ServicePrincipalFederationPolicyAPI servicePrincipalFederationPolicyAPI; private ServicePrincipalSecretsAPI servicePrincipalSecretsAPI; private AccountServicePrincipalsAPI servicePrincipalsAPI; private AccountSettingsAPI settingsAPI; @@ -107,6 +113,7 @@ public AccountClient(DatabricksConfig config) { credentialsAPI = new CredentialsAPI(apiClient); customAppIntegrationAPI = new CustomAppIntegrationAPI(apiClient); encryptionKeysAPI = new EncryptionKeysAPI(apiClient); + federationPolicyAPI = new AccountFederationPolicyAPI(apiClient); groupsAPI = new AccountGroupsAPI(apiClient); ipAccessListsAPI = new AccountIpAccessListsAPI(apiClient); logDeliveryAPI = new LogDeliveryAPI(apiClient); @@ -117,6 +124,7 @@ public AccountClient(DatabricksConfig config) { oAuthPublishedAppsAPI = new OAuthPublishedAppsAPI(apiClient); privateAccessAPI = new PrivateAccessAPI(apiClient); publishedAppIntegrationAPI = new PublishedAppIntegrationAPI(apiClient); + servicePrincipalFederationPolicyAPI = new ServicePrincipalFederationPolicyAPI(apiClient); servicePrincipalSecretsAPI = new ServicePrincipalSecretsAPI(apiClient); servicePrincipalsAPI = new AccountServicePrincipalsAPI(apiClient); settingsAPI = new AccountSettingsAPI(apiClient); @@ -191,6 +199,55 @@ public EncryptionKeysAPI encryptionKeys() { return encryptionKeysAPI; } + /** + * These APIs manage account federation policies. + * + *

Account federation policies allow users and service principals in your Databricks account to + * securely access Databricks APIs using tokens from your trusted identity providers (IdPs). + * + *

With token federation, your users and service principals can exchange tokens from your IdP + * for Databricks OAuth tokens, which can be used to access Databricks APIs. Token federation + * eliminates the need to manage Databricks secrets, and allows you to centralize management of + * token issuance policies in your IdP. Databricks token federation is typically used in + * combination with [SCIM], so users in your IdP are synchronized into your Databricks account. + * + *

Token federation is configured in your Databricks account using an account federation + * policy. An account federation policy specifies: * which IdP, or issuer, your Databricks account + * should accept tokens from * how to determine which Databricks user, or subject, a token is + * issued for + * + *

To configure a federation policy, you provide the following: * The required token + * __issuer__, as specified in the “iss” claim of your tokens. The issuer is an https URL that + * identifies your IdP. * The allowed token __audiences__, as specified in the “aud” claim of your + * tokens. This identifier is intended to represent the recipient of the token. As long as the + * audience in the token matches at least one audience in the policy, the token is considered a + * match. If unspecified, the default value is your Databricks account id. * The __subject + * claim__, which indicates which token claim contains the Databricks username of the user the + * token was issued for. If unspecified, the default value is “sub”. * Optionally, the public keys + * used to validate the signature of your tokens, in JWKS format. If unspecified (recommended), + * Databricks automatically fetches the public keys from your issuer’s well known endpoint. + * Databricks strongly recommends relying on your issuer’s well known endpoint for discovering + * public keys. + * + *

An example federation policy is: ``` issuer: "https://idp.mycompany.com/oidc" audiences: + * ["databricks"] subject_claim: "sub" ``` + * + *

An example JWT token body that matches this policy and could be used to authenticate to + * Databricks as user `username@mycompany.com` is: ``` { "iss": "https://idp.mycompany.com/oidc", + * "aud": "databricks", "sub": "username@mycompany.com" } ``` + * + *

You may also need to configure your IdP to generate tokens for your users to exchange with + * Databricks, if your users do not already have the ability to generate tokens that are + * compatible with your federation policy. + * + *

You do not need to configure an OAuth application in Databricks to use token federation. + * + *

[SCIM]: https://docs.databricks.com/admin/users-groups/scim/index.html + */ + public AccountFederationPolicyAPI federationPolicy() { + return federationPolicyAPI; + } + /** * Groups simplify identity management, making it easier to assign access to Databricks account, * data, and other securable objects. @@ -342,6 +399,55 @@ public PublishedAppIntegrationAPI publishedAppIntegration() { return publishedAppIntegrationAPI; } + /** + * These APIs manage service principal federation policies. + * + *

Service principal federation, also known as Workload Identity Federation, allows your + * automated workloads running outside of Databricks to securely access Databricks APIs without + * the need for Databricks secrets. With Workload Identity Federation, your application (or + * workload) authenticates to Databricks as a Databricks service principal, using tokens provided + * by the workload runtime. + * + *

Databricks strongly recommends using Workload Identity Federation to authenticate to + * Databricks from automated workloads, over alternatives such as OAuth client secrets or Personal + * Access Tokens, whenever possible. Workload Identity Federation is supported by many popular + * services, including Github Actions, Azure DevOps, GitLab, Terraform Cloud, and Kubernetes + * clusters, among others. + * + *

Workload identity federation is configured in your Databricks account using a service + * principal federation policy. A service principal federation policy specifies: * which IdP, or + * issuer, the service principal is allowed to authenticate from * which workload identity, or + * subject, is allowed to authenticate as the Databricks service principal + * + *

To configure a federation policy, you provide the following: * The required token + * __issuer__, as specified in the “iss” claim of workload identity tokens. The issuer is an https + * URL that identifies the workload identity provider. * The required token __subject__, as + * specified in the “sub” claim of workload identity tokens. The subject uniquely identifies the + * workload in the workload runtime environment. * The allowed token __audiences__, as specified + * in the “aud” claim of workload identity tokens. The audience is intended to represent the + * recipient of the token. As long as the audience in the token matches at least one audience in + * the policy, the token is considered a match. If unspecified, the default value is your + * Databricks account id. * Optionally, the public keys used to validate the signature of the + * workload identity tokens, in JWKS format. If unspecified (recommended), Databricks + * automatically fetches the public keys from the issuer’s well known endpoint. Databricks + * strongly recommends relying on the issuer’s well known endpoint for discovering public keys. + * + *

An example service principal federation policy, for a Github Actions workload, is: ``` + * issuer: "https://token.actions.githubusercontent.com" audiences: + * ["https://github.com/my-github-org"] subject: "repo:my-github-org/my-repo:environment:prod" ``` + * + *

An example JWT token body that matches this policy and could be used to authenticate to + * Databricks is: ``` { "iss": "https://token.actions.githubusercontent.com", "aud": + * "https://github.com/my-github-org", "sub": "repo:my-github-org/my-repo:environment:prod" } ``` + * + *

You may also need to configure the workload runtime to generate tokens for your workloads. + * + *

You do not need to configure an OAuth application in Databricks to use token federation. + */ + public ServicePrincipalFederationPolicyAPI servicePrincipalFederationPolicy() { + return servicePrincipalFederationPolicyAPI; + } + /** * These APIs enable administrators to manage service principal secrets. * @@ -509,6 +615,18 @@ public AccountClient withEncryptionKeysAPI(EncryptionKeysAPI encryptionKeys) { return this; } + /** Replace the default AccountFederationPolicyService with a custom implementation. */ + public AccountClient withFederationPolicyImpl( + AccountFederationPolicyService accountFederationPolicy) { + return this.withFederationPolicyAPI(new AccountFederationPolicyAPI(accountFederationPolicy)); + } + + /** Replace the default AccountFederationPolicyAPI with a custom implementation. */ + public AccountClient withFederationPolicyAPI(AccountFederationPolicyAPI accountFederationPolicy) { + this.federationPolicyAPI = accountFederationPolicy; + return this; + } + /** Replace the default AccountGroupsService with a custom implementation. */ public AccountClient withGroupsImpl(AccountGroupsService accountGroups) { return this.withGroupsAPI(new AccountGroupsAPI(accountGroups)); @@ -625,6 +743,20 @@ public AccountClient withPublishedAppIntegrationAPI( return this; } + /** Replace the default ServicePrincipalFederationPolicyService with a custom implementation. */ + public AccountClient withServicePrincipalFederationPolicyImpl( + ServicePrincipalFederationPolicyService servicePrincipalFederationPolicy) { + return this.withServicePrincipalFederationPolicyAPI( + new ServicePrincipalFederationPolicyAPI(servicePrincipalFederationPolicy)); + } + + /** Replace the default ServicePrincipalFederationPolicyAPI with a custom implementation. */ + public AccountClient withServicePrincipalFederationPolicyAPI( + ServicePrincipalFederationPolicyAPI servicePrincipalFederationPolicy) { + this.servicePrincipalFederationPolicyAPI = servicePrincipalFederationPolicy; + return this; + } + /** Replace the default ServicePrincipalSecretsService with a custom implementation. */ public AccountClient withServicePrincipalSecretsImpl( ServicePrincipalSecretsService servicePrincipalSecrets) { diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetBindingsSecurableType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetBindingsSecurableType.java index a90291dd0..d35c64de5 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetBindingsSecurableType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetBindingsSecurableType.java @@ -10,12 +10,12 @@ public enum GetBindingsSecurableType { @JsonProperty("catalog") CATALOG, + @JsonProperty("credential") + CREDENTIAL, + @JsonProperty("external_location") EXTERNAL_LOCATION, - @JsonProperty("service_credential") - SERVICE_CREDENTIAL, - @JsonProperty("storage_credential") STORAGE_CREDENTIAL, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateBindingsSecurableType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateBindingsSecurableType.java index ab1a503f4..412d8d69b 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateBindingsSecurableType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateBindingsSecurableType.java @@ -10,12 +10,12 @@ public enum UpdateBindingsSecurableType { @JsonProperty("catalog") CATALOG, + @JsonProperty("credential") + CREDENTIAL, + @JsonProperty("external_location") EXTERNAL_LOCATION, - @JsonProperty("service_credential") - SERVICE_CREDENTIAL, - @JsonProperty("storage_credential") STORAGE_CREDENTIAL, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java index 3787fbb8e..586817c27 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java @@ -67,13 +67,19 @@ public class ClusterAttributes { * Data security mode decides what data governance model to use when accessing data from a * cluster. * - *

* `NONE`: No security isolation for multiple users sharing the cluster. Data governance - * features are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be - * exclusively used by a single user specified in `single_user_name`. Most programming languages, - * cluster features and data governance features are available in this mode. * `USER_ISOLATION`: A - * secure cluster that can be shared by multiple users. Cluster users are fully isolated so that - * they cannot see each other's data and credentials. Most data governance features are supported - * in this mode. But programming languages and cluster features might be limited. + *

The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks + * will choose the most appropriate access mode depending on your compute configuration. * + * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: + * Alias for `SINGLE_USER`. + * + *

The following modes can be used regardless of `kind`. * `NONE`: No security isolation for + * multiple users sharing the cluster. Data governance features are not available in this mode. * + * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in + * `single_user_name`. Most programming languages, cluster features and data governance features + * are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple + * users. Cluster users are fully isolated so that they cannot see each other's data and + * credentials. Most data governance features are supported in this mode. But programming + * languages and cluster features might be limited. * *

The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed * for future Databricks Runtime versions: @@ -136,6 +142,26 @@ public class ClusterAttributes { @JsonProperty("instance_pool_id") private String instancePoolId; + /** + * This field can only be used with `kind`. + * + *

When set to true, Databricks will automatically set single node related `custom_tags`, + * `spark_conf`, and `num_workers` + */ + @JsonProperty("is_single_node") + private Boolean isSingleNode; + + /** + * The kind of compute described by this compute specification. + * + *

Depending on `kind`, different validations and default values will be applied. + * + *

The first usage of this value is for the simple cluster form where it sets `kind = + * CLASSIC_PREVIEW`. + */ + @JsonProperty("kind") + private Kind kind; + /** * This field encodes, through a single value, the resources available to each of the Spark nodes * in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or @@ -204,6 +230,15 @@ public class ClusterAttributes { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** + * This field can only be used with `kind`. + * + *

`effective_spark_version` is determined by `spark_version` (DBR release), this field + * `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + */ + @JsonProperty("use_ml_runtime") + private Boolean useMlRuntime; + /** */ @JsonProperty("workload_type") private WorkloadType workloadType; @@ -343,6 +378,24 @@ public String getInstancePoolId() { return instancePoolId; } + public ClusterAttributes setIsSingleNode(Boolean isSingleNode) { + this.isSingleNode = isSingleNode; + return this; + } + + public Boolean getIsSingleNode() { + return isSingleNode; + } + + public ClusterAttributes setKind(Kind kind) { + this.kind = kind; + return this; + } + + public Kind getKind() { + return kind; + } + public ClusterAttributes setNodeTypeId(String nodeTypeId) { this.nodeTypeId = nodeTypeId; return this; @@ -415,6 +468,15 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public ClusterAttributes setUseMlRuntime(Boolean useMlRuntime) { + this.useMlRuntime = useMlRuntime; + return this; + } + + public Boolean getUseMlRuntime() { + return useMlRuntime; + } + public ClusterAttributes setWorkloadType(WorkloadType workloadType) { this.workloadType = workloadType; return this; @@ -444,6 +506,8 @@ public boolean equals(Object o) { && Objects.equals(gcpAttributes, that.gcpAttributes) && Objects.equals(initScripts, that.initScripts) && Objects.equals(instancePoolId, that.instancePoolId) + && Objects.equals(isSingleNode, that.isSingleNode) + && Objects.equals(kind, that.kind) && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(policyId, that.policyId) && Objects.equals(runtimeEngine, that.runtimeEngine) @@ -452,6 +516,7 @@ public boolean equals(Object o) { && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -473,6 +538,8 @@ public int hashCode() { gcpAttributes, initScripts, instancePoolId, + isSingleNode, + kind, nodeTypeId, policyId, runtimeEngine, @@ -481,6 +548,7 @@ public int hashCode() { sparkEnvVars, sparkVersion, sshPublicKeys, + useMlRuntime, workloadType); } @@ -502,6 +570,8 @@ public String toString() { .add("gcpAttributes", gcpAttributes) .add("initScripts", initScripts) .add("instancePoolId", instancePoolId) + .add("isSingleNode", isSingleNode) + .add("kind", kind) .add("nodeTypeId", nodeTypeId) .add("policyId", policyId) .add("runtimeEngine", runtimeEngine) @@ -510,6 +580,7 @@ public String toString() { .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java index ab02e71d0..2a182f0c9 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java @@ -111,13 +111,19 @@ public class ClusterDetails { * Data security mode decides what data governance model to use when accessing data from a * cluster. * - *

* `NONE`: No security isolation for multiple users sharing the cluster. Data governance - * features are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be - * exclusively used by a single user specified in `single_user_name`. Most programming languages, - * cluster features and data governance features are available in this mode. * `USER_ISOLATION`: A - * secure cluster that can be shared by multiple users. Cluster users are fully isolated so that - * they cannot see each other's data and credentials. Most data governance features are supported - * in this mode. But programming languages and cluster features might be limited. + *

The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks + * will choose the most appropriate access mode depending on your compute configuration. * + * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: + * Alias for `SINGLE_USER`. + * + *

The following modes can be used regardless of `kind`. * `NONE`: No security isolation for + * multiple users sharing the cluster. Data governance features are not available in this mode. * + * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in + * `single_user_name`. Most programming languages, cluster features and data governance features + * are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple + * users. Cluster users are fully isolated so that they cannot see each other's data and + * credentials. Most data governance features are supported in this mode. But programming + * languages and cluster features might be limited. * *

The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed * for future Databricks Runtime versions: @@ -207,6 +213,15 @@ public class ClusterDetails { @JsonProperty("instance_pool_id") private String instancePoolId; + /** + * This field can only be used with `kind`. + * + *

When set to true, Databricks will automatically set single node related `custom_tags`, + * `spark_conf`, and `num_workers` + */ + @JsonProperty("is_single_node") + private Boolean isSingleNode; + /** * Port on which Spark JDBC server is listening, in the driver nod. No service will be listeningon * on this port in executor nodes. @@ -214,6 +229,17 @@ public class ClusterDetails { @JsonProperty("jdbc_port") private Long jdbcPort; + /** + * The kind of compute described by this compute specification. + * + *

Depending on `kind`, different validations and default values will be applied. + * + *

The first usage of this value is for the simple cluster form where it sets `kind = + * CLASSIC_PREVIEW`. + */ + @JsonProperty("kind") + private Kind kind; + /** the timestamp that the cluster was started/restarted */ @JsonProperty("last_restarted_time") private Long lastRestartedTime; @@ -349,6 +375,15 @@ public class ClusterDetails { @JsonProperty("termination_reason") private TerminationReason terminationReason; + /** + * This field can only be used with `kind`. + * + *

`effective_spark_version` is determined by `spark_version` (DBR release), this field + * `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + */ + @JsonProperty("use_ml_runtime") + private Boolean useMlRuntime; + /** */ @JsonProperty("workload_type") private WorkloadType workloadType; @@ -578,6 +613,15 @@ public String getInstancePoolId() { return instancePoolId; } + public ClusterDetails setIsSingleNode(Boolean isSingleNode) { + this.isSingleNode = isSingleNode; + return this; + } + + public Boolean getIsSingleNode() { + return isSingleNode; + } + public ClusterDetails setJdbcPort(Long jdbcPort) { this.jdbcPort = jdbcPort; return this; @@ -587,6 +631,15 @@ public Long getJdbcPort() { return jdbcPort; } + public ClusterDetails setKind(Kind kind) { + this.kind = kind; + return this; + } + + public Kind getKind() { + return kind; + } + public ClusterDetails setLastRestartedTime(Long lastRestartedTime) { this.lastRestartedTime = lastRestartedTime; return this; @@ -749,6 +802,15 @@ public TerminationReason getTerminationReason() { return terminationReason; } + public ClusterDetails setUseMlRuntime(Boolean useMlRuntime) { + this.useMlRuntime = useMlRuntime; + return this; + } + + public Boolean getUseMlRuntime() { + return useMlRuntime; + } + public ClusterDetails setWorkloadType(WorkloadType workloadType) { this.workloadType = workloadType; return this; @@ -788,7 +850,9 @@ public boolean equals(Object o) { && Objects.equals(gcpAttributes, that.gcpAttributes) && Objects.equals(initScripts, that.initScripts) && Objects.equals(instancePoolId, that.instancePoolId) + && Objects.equals(isSingleNode, that.isSingleNode) && Objects.equals(jdbcPort, that.jdbcPort) + && Objects.equals(kind, that.kind) && Objects.equals(lastRestartedTime, that.lastRestartedTime) && Objects.equals(lastStateLossTime, that.lastStateLossTime) && Objects.equals(nodeTypeId, that.nodeTypeId) @@ -807,6 +871,7 @@ public boolean equals(Object o) { && Objects.equals(stateMessage, that.stateMessage) && Objects.equals(terminatedTime, that.terminatedTime) && Objects.equals(terminationReason, that.terminationReason) + && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -838,7 +903,9 @@ public int hashCode() { gcpAttributes, initScripts, instancePoolId, + isSingleNode, jdbcPort, + kind, lastRestartedTime, lastStateLossTime, nodeTypeId, @@ -857,6 +924,7 @@ public int hashCode() { stateMessage, terminatedTime, terminationReason, + useMlRuntime, workloadType); } @@ -888,7 +956,9 @@ public String toString() { .add("gcpAttributes", gcpAttributes) .add("initScripts", initScripts) .add("instancePoolId", instancePoolId) + .add("isSingleNode", isSingleNode) .add("jdbcPort", jdbcPort) + .add("kind", kind) .add("lastRestartedTime", lastRestartedTime) .add("lastStateLossTime", lastStateLossTime) .add("nodeTypeId", nodeTypeId) @@ -907,6 +977,7 @@ public String toString() { .add("stateMessage", stateMessage) .add("terminatedTime", terminatedTime) .add("terminationReason", terminationReason) + .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java index f5d7f925d..d0ebea4b3 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java @@ -81,13 +81,19 @@ public class ClusterSpec { * Data security mode decides what data governance model to use when accessing data from a * cluster. * - *

* `NONE`: No security isolation for multiple users sharing the cluster. Data governance - * features are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be - * exclusively used by a single user specified in `single_user_name`. Most programming languages, - * cluster features and data governance features are available in this mode. * `USER_ISOLATION`: A - * secure cluster that can be shared by multiple users. Cluster users are fully isolated so that - * they cannot see each other's data and credentials. Most data governance features are supported - * in this mode. But programming languages and cluster features might be limited. + *

The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks + * will choose the most appropriate access mode depending on your compute configuration. * + * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: + * Alias for `SINGLE_USER`. + * + *

The following modes can be used regardless of `kind`. * `NONE`: No security isolation for + * multiple users sharing the cluster. Data governance features are not available in this mode. * + * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in + * `single_user_name`. Most programming languages, cluster features and data governance features + * are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple + * users. Cluster users are fully isolated so that they cannot see each other's data and + * credentials. Most data governance features are supported in this mode. But programming + * languages and cluster features might be limited. * *

The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed * for future Databricks Runtime versions: @@ -150,6 +156,26 @@ public class ClusterSpec { @JsonProperty("instance_pool_id") private String instancePoolId; + /** + * This field can only be used with `kind`. + * + *

When set to true, Databricks will automatically set single node related `custom_tags`, + * `spark_conf`, and `num_workers` + */ + @JsonProperty("is_single_node") + private Boolean isSingleNode; + + /** + * The kind of compute described by this compute specification. + * + *

Depending on `kind`, different validations and default values will be applied. + * + *

The first usage of this value is for the simple cluster form where it sets `kind = + * CLASSIC_PREVIEW`. + */ + @JsonProperty("kind") + private Kind kind; + /** * This field encodes, through a single value, the resources available to each of the Spark nodes * in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or @@ -231,6 +257,15 @@ public class ClusterSpec { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** + * This field can only be used with `kind`. + * + *

`effective_spark_version` is determined by `spark_version` (DBR release), this field + * `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + */ + @JsonProperty("use_ml_runtime") + private Boolean useMlRuntime; + /** */ @JsonProperty("workload_type") private WorkloadType workloadType; @@ -388,6 +423,24 @@ public String getInstancePoolId() { return instancePoolId; } + public ClusterSpec setIsSingleNode(Boolean isSingleNode) { + this.isSingleNode = isSingleNode; + return this; + } + + public Boolean getIsSingleNode() { + return isSingleNode; + } + + public ClusterSpec setKind(Kind kind) { + this.kind = kind; + return this; + } + + public Kind getKind() { + return kind; + } + public ClusterSpec setNodeTypeId(String nodeTypeId) { this.nodeTypeId = nodeTypeId; return this; @@ -469,6 +522,15 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public ClusterSpec setUseMlRuntime(Boolean useMlRuntime) { + this.useMlRuntime = useMlRuntime; + return this; + } + + public Boolean getUseMlRuntime() { + return useMlRuntime; + } + public ClusterSpec setWorkloadType(WorkloadType workloadType) { this.workloadType = workloadType; return this; @@ -500,6 +562,8 @@ public boolean equals(Object o) { && Objects.equals(gcpAttributes, that.gcpAttributes) && Objects.equals(initScripts, that.initScripts) && Objects.equals(instancePoolId, that.instancePoolId) + && Objects.equals(isSingleNode, that.isSingleNode) + && Objects.equals(kind, that.kind) && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) @@ -509,6 +573,7 @@ public boolean equals(Object o) { && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -532,6 +597,8 @@ public int hashCode() { gcpAttributes, initScripts, instancePoolId, + isSingleNode, + kind, nodeTypeId, numWorkers, policyId, @@ -541,6 +608,7 @@ public int hashCode() { sparkEnvVars, sparkVersion, sshPublicKeys, + useMlRuntime, workloadType); } @@ -564,6 +632,8 @@ public String toString() { .add("gcpAttributes", gcpAttributes) .add("initScripts", initScripts) .add("instancePoolId", instancePoolId) + .add("isSingleNode", isSingleNode) + .add("kind", kind) .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) @@ -573,6 +643,7 @@ public String toString() { .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java index 469137cb7..4ee21def2 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java @@ -88,13 +88,19 @@ public class CreateCluster { * Data security mode decides what data governance model to use when accessing data from a * cluster. * - *

* `NONE`: No security isolation for multiple users sharing the cluster. Data governance - * features are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be - * exclusively used by a single user specified in `single_user_name`. Most programming languages, - * cluster features and data governance features are available in this mode. * `USER_ISOLATION`: A - * secure cluster that can be shared by multiple users. Cluster users are fully isolated so that - * they cannot see each other's data and credentials. Most data governance features are supported - * in this mode. But programming languages and cluster features might be limited. + *

The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks + * will choose the most appropriate access mode depending on your compute configuration. * + * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: + * Alias for `SINGLE_USER`. + * + *

The following modes can be used regardless of `kind`. * `NONE`: No security isolation for + * multiple users sharing the cluster. Data governance features are not available in this mode. * + * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in + * `single_user_name`. Most programming languages, cluster features and data governance features + * are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple + * users. Cluster users are fully isolated so that they cannot see each other's data and + * credentials. Most data governance features are supported in this mode. But programming + * languages and cluster features might be limited. * *

The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed * for future Databricks Runtime versions: @@ -157,6 +163,26 @@ public class CreateCluster { @JsonProperty("instance_pool_id") private String instancePoolId; + /** + * This field can only be used with `kind`. + * + *

When set to true, Databricks will automatically set single node related `custom_tags`, + * `spark_conf`, and `num_workers` + */ + @JsonProperty("is_single_node") + private Boolean isSingleNode; + + /** + * The kind of compute described by this compute specification. + * + *

Depending on `kind`, different validations and default values will be applied. + * + *

The first usage of this value is for the simple cluster form where it sets `kind = + * CLASSIC_PREVIEW`. + */ + @JsonProperty("kind") + private Kind kind; + /** * This field encodes, through a single value, the resources available to each of the Spark nodes * in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or @@ -238,6 +264,15 @@ public class CreateCluster { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** + * This field can only be used with `kind`. + * + *

`effective_spark_version` is determined by `spark_version` (DBR release), this field + * `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + */ + @JsonProperty("use_ml_runtime") + private Boolean useMlRuntime; + /** */ @JsonProperty("workload_type") private WorkloadType workloadType; @@ -404,6 +439,24 @@ public String getInstancePoolId() { return instancePoolId; } + public CreateCluster setIsSingleNode(Boolean isSingleNode) { + this.isSingleNode = isSingleNode; + return this; + } + + public Boolean getIsSingleNode() { + return isSingleNode; + } + + public CreateCluster setKind(Kind kind) { + this.kind = kind; + return this; + } + + public Kind getKind() { + return kind; + } + public CreateCluster setNodeTypeId(String nodeTypeId) { this.nodeTypeId = nodeTypeId; return this; @@ -485,6 +538,15 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public CreateCluster setUseMlRuntime(Boolean useMlRuntime) { + this.useMlRuntime = useMlRuntime; + return this; + } + + public Boolean getUseMlRuntime() { + return useMlRuntime; + } + public CreateCluster setWorkloadType(WorkloadType workloadType) { this.workloadType = workloadType; return this; @@ -517,6 +579,8 @@ public boolean equals(Object o) { && Objects.equals(gcpAttributes, that.gcpAttributes) && Objects.equals(initScripts, that.initScripts) && Objects.equals(instancePoolId, that.instancePoolId) + && Objects.equals(isSingleNode, that.isSingleNode) + && Objects.equals(kind, that.kind) && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) @@ -526,6 +590,7 @@ public boolean equals(Object o) { && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -550,6 +615,8 @@ public int hashCode() { gcpAttributes, initScripts, instancePoolId, + isSingleNode, + kind, nodeTypeId, numWorkers, policyId, @@ -559,6 +626,7 @@ public int hashCode() { sparkEnvVars, sparkVersion, sshPublicKeys, + useMlRuntime, workloadType); } @@ -583,6 +651,8 @@ public String toString() { .add("gcpAttributes", gcpAttributes) .add("initScripts", initScripts) .add("instancePoolId", instancePoolId) + .add("isSingleNode", isSingleNode) + .add("kind", kind) .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) @@ -592,6 +662,7 @@ public String toString() { .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/DataSecurityMode.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/DataSecurityMode.java index 9c905f177..5de06979d 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/DataSecurityMode.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/DataSecurityMode.java @@ -7,13 +7,19 @@ /** * Data security mode decides what data governance model to use when accessing data from a cluster. * - *

* `NONE`: No security isolation for multiple users sharing the cluster. Data governance - * features are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be - * exclusively used by a single user specified in `single_user_name`. Most programming languages, - * cluster features and data governance features are available in this mode. * `USER_ISOLATION`: A - * secure cluster that can be shared by multiple users. Cluster users are fully isolated so that - * they cannot see each other's data and credentials. Most data governance features are supported in - * this mode. But programming languages and cluster features might be limited. + *

The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks will + * choose the most appropriate access mode depending on your compute configuration. * + * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: + * Alias for `SINGLE_USER`. + * + *

The following modes can be used regardless of `kind`. * `NONE`: No security isolation for + * multiple users sharing the cluster. Data governance features are not available in this mode. * + * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in + * `single_user_name`. Most programming languages, cluster features and data governance features are + * available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple + * users. Cluster users are fully isolated so that they cannot see each other's data and + * credentials. Most data governance features are supported in this mode. But programming languages + * and cluster features might be limited. * *

The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed * for future Databricks Runtime versions: @@ -26,6 +32,11 @@ */ @Generated public enum DataSecurityMode { + DATA_SECURITY_MODE_AUTO, // will choose the most appropriate access mode depending on + // your + // compute configuration. + DATA_SECURITY_MODE_DEDICATED, // Alias for `SINGLE_USER`. + DATA_SECURITY_MODE_STANDARD, // Alias for `USER_ISOLATION`. LEGACY_PASSTHROUGH, // This mode is for users migrating from legacy Passthrough on high // concurrency // clusters. diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java index 794e1f595..33cd6edda 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java @@ -85,13 +85,19 @@ public class EditCluster { * Data security mode decides what data governance model to use when accessing data from a * cluster. * - *

* `NONE`: No security isolation for multiple users sharing the cluster. Data governance - * features are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be - * exclusively used by a single user specified in `single_user_name`. Most programming languages, - * cluster features and data governance features are available in this mode. * `USER_ISOLATION`: A - * secure cluster that can be shared by multiple users. Cluster users are fully isolated so that - * they cannot see each other's data and credentials. Most data governance features are supported - * in this mode. But programming languages and cluster features might be limited. + *

The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks + * will choose the most appropriate access mode depending on your compute configuration. * + * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: + * Alias for `SINGLE_USER`. + * + *

The following modes can be used regardless of `kind`. * `NONE`: No security isolation for + * multiple users sharing the cluster. Data governance features are not available in this mode. * + * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in + * `single_user_name`. Most programming languages, cluster features and data governance features + * are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple + * users. Cluster users are fully isolated so that they cannot see each other's data and + * credentials. Most data governance features are supported in this mode. But programming + * languages and cluster features might be limited. * *

The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed * for future Databricks Runtime versions: @@ -154,6 +160,26 @@ public class EditCluster { @JsonProperty("instance_pool_id") private String instancePoolId; + /** + * This field can only be used with `kind`. + * + *

When set to true, Databricks will automatically set single node related `custom_tags`, + * `spark_conf`, and `num_workers` + */ + @JsonProperty("is_single_node") + private Boolean isSingleNode; + + /** + * The kind of compute described by this compute specification. + * + *

Depending on `kind`, different validations and default values will be applied. + * + *

The first usage of this value is for the simple cluster form where it sets `kind = + * CLASSIC_PREVIEW`. + */ + @JsonProperty("kind") + private Kind kind; + /** * This field encodes, through a single value, the resources available to each of the Spark nodes * in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or @@ -235,6 +261,15 @@ public class EditCluster { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** + * This field can only be used with `kind`. + * + *

`effective_spark_version` is determined by `spark_version` (DBR release), this field + * `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + */ + @JsonProperty("use_ml_runtime") + private Boolean useMlRuntime; + /** */ @JsonProperty("workload_type") private WorkloadType workloadType; @@ -401,6 +436,24 @@ public String getInstancePoolId() { return instancePoolId; } + public EditCluster setIsSingleNode(Boolean isSingleNode) { + this.isSingleNode = isSingleNode; + return this; + } + + public Boolean getIsSingleNode() { + return isSingleNode; + } + + public EditCluster setKind(Kind kind) { + this.kind = kind; + return this; + } + + public Kind getKind() { + return kind; + } + public EditCluster setNodeTypeId(String nodeTypeId) { this.nodeTypeId = nodeTypeId; return this; @@ -482,6 +535,15 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public EditCluster setUseMlRuntime(Boolean useMlRuntime) { + this.useMlRuntime = useMlRuntime; + return this; + } + + public Boolean getUseMlRuntime() { + return useMlRuntime; + } + public EditCluster setWorkloadType(WorkloadType workloadType) { this.workloadType = workloadType; return this; @@ -514,6 +576,8 @@ public boolean equals(Object o) { && Objects.equals(gcpAttributes, that.gcpAttributes) && Objects.equals(initScripts, that.initScripts) && Objects.equals(instancePoolId, that.instancePoolId) + && Objects.equals(isSingleNode, that.isSingleNode) + && Objects.equals(kind, that.kind) && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) @@ -523,6 +587,7 @@ public boolean equals(Object o) { && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -547,6 +612,8 @@ public int hashCode() { gcpAttributes, initScripts, instancePoolId, + isSingleNode, + kind, nodeTypeId, numWorkers, policyId, @@ -556,6 +623,7 @@ public int hashCode() { sparkEnvVars, sparkVersion, sshPublicKeys, + useMlRuntime, workloadType); } @@ -580,6 +648,8 @@ public String toString() { .add("gcpAttributes", gcpAttributes) .add("initScripts", initScripts) .add("instancePoolId", instancePoolId) + .add("isSingleNode", isSingleNode) + .add("kind", kind) .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) @@ -589,6 +659,7 @@ public String toString() { .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Kind.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Kind.java new file mode 100755 index 000000000..11c0ce083 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Kind.java @@ -0,0 +1,18 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.compute; + +import com.databricks.sdk.support.Generated; + +/** + * The kind of compute described by this compute specification. + * + *

Depending on `kind`, different validations and default values will be applied. + * + *

The first usage of this value is for the simple cluster form where it sets `kind = + * CLASSIC_PREVIEW`. + */ +@Generated +public enum Kind { + CLASSIC_PREVIEW, +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java index 8f9d82019..30abd3905 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java @@ -74,13 +74,19 @@ public class UpdateClusterResource { * Data security mode decides what data governance model to use when accessing data from a * cluster. * - *

* `NONE`: No security isolation for multiple users sharing the cluster. Data governance - * features are not available in this mode. * `SINGLE_USER`: A secure cluster that can only be - * exclusively used by a single user specified in `single_user_name`. Most programming languages, - * cluster features and data governance features are available in this mode. * `USER_ISOLATION`: A - * secure cluster that can be shared by multiple users. Cluster users are fully isolated so that - * they cannot see each other's data and credentials. Most data governance features are supported - * in this mode. But programming languages and cluster features might be limited. + *

The following modes can only be used with `kind`. * `DATA_SECURITY_MODE_AUTO`: Databricks + * will choose the most appropriate access mode depending on your compute configuration. * + * `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * `DATA_SECURITY_MODE_DEDICATED`: + * Alias for `SINGLE_USER`. + * + *

The following modes can be used regardless of `kind`. * `NONE`: No security isolation for + * multiple users sharing the cluster. Data governance features are not available in this mode. * + * `SINGLE_USER`: A secure cluster that can only be exclusively used by a single user specified in + * `single_user_name`. Most programming languages, cluster features and data governance features + * are available in this mode. * `USER_ISOLATION`: A secure cluster that can be shared by multiple + * users. Cluster users are fully isolated so that they cannot see each other's data and + * credentials. Most data governance features are supported in this mode. But programming + * languages and cluster features might be limited. * *

The following modes are deprecated starting with Databricks Runtime 15.0 and will be removed * for future Databricks Runtime versions: @@ -143,6 +149,26 @@ public class UpdateClusterResource { @JsonProperty("instance_pool_id") private String instancePoolId; + /** + * This field can only be used with `kind`. + * + *

When set to true, Databricks will automatically set single node related `custom_tags`, + * `spark_conf`, and `num_workers` + */ + @JsonProperty("is_single_node") + private Boolean isSingleNode; + + /** + * The kind of compute described by this compute specification. + * + *

Depending on `kind`, different validations and default values will be applied. + * + *

The first usage of this value is for the simple cluster form where it sets `kind = + * CLASSIC_PREVIEW`. + */ + @JsonProperty("kind") + private Kind kind; + /** * This field encodes, through a single value, the resources available to each of the Spark nodes * in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or @@ -224,6 +250,15 @@ public class UpdateClusterResource { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** + * This field can only be used with `kind`. + * + *

`effective_spark_version` is determined by `spark_version` (DBR release), this field + * `use_ml_runtime`, and whether `node_type_id` is gpu node or not. + */ + @JsonProperty("use_ml_runtime") + private Boolean useMlRuntime; + /** */ @JsonProperty("workload_type") private WorkloadType workloadType; @@ -372,6 +407,24 @@ public String getInstancePoolId() { return instancePoolId; } + public UpdateClusterResource setIsSingleNode(Boolean isSingleNode) { + this.isSingleNode = isSingleNode; + return this; + } + + public Boolean getIsSingleNode() { + return isSingleNode; + } + + public UpdateClusterResource setKind(Kind kind) { + this.kind = kind; + return this; + } + + public Kind getKind() { + return kind; + } + public UpdateClusterResource setNodeTypeId(String nodeTypeId) { this.nodeTypeId = nodeTypeId; return this; @@ -453,6 +506,15 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public UpdateClusterResource setUseMlRuntime(Boolean useMlRuntime) { + this.useMlRuntime = useMlRuntime; + return this; + } + + public Boolean getUseMlRuntime() { + return useMlRuntime; + } + public UpdateClusterResource setWorkloadType(WorkloadType workloadType) { this.workloadType = workloadType; return this; @@ -483,6 +545,8 @@ public boolean equals(Object o) { && Objects.equals(gcpAttributes, that.gcpAttributes) && Objects.equals(initScripts, that.initScripts) && Objects.equals(instancePoolId, that.instancePoolId) + && Objects.equals(isSingleNode, that.isSingleNode) + && Objects.equals(kind, that.kind) && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) @@ -492,6 +556,7 @@ public boolean equals(Object o) { && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -514,6 +579,8 @@ public int hashCode() { gcpAttributes, initScripts, instancePoolId, + isSingleNode, + kind, nodeTypeId, numWorkers, policyId, @@ -523,6 +590,7 @@ public int hashCode() { sparkEnvVars, sparkVersion, sshPublicKeys, + useMlRuntime, workloadType); } @@ -545,6 +613,8 @@ public String toString() { .add("gcpAttributes", gcpAttributes) .add("initScripts", initScripts) .add("instancePoolId", instancePoolId) + .add("isSingleNode", isSingleNode) + .add("kind", kind) .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) @@ -554,6 +624,7 @@ public String toString() { .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MigrateDashboardRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MigrateDashboardRequest.java index 360c202ec..674a9f461 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MigrateDashboardRequest.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MigrateDashboardRequest.java @@ -21,6 +21,13 @@ public class MigrateDashboardRequest { @JsonProperty("source_dashboard_id") private String sourceDashboardId; + /** + * Flag to indicate if mustache parameter syntax ({{ param }}) should be auto-updated to named + * syntax (:param) when converting datasets in the dashboard. + */ + @JsonProperty("update_parameter_syntax") + private Boolean updateParameterSyntax; + public MigrateDashboardRequest setDisplayName(String displayName) { this.displayName = displayName; return this; @@ -48,6 +55,15 @@ public String getSourceDashboardId() { return sourceDashboardId; } + public MigrateDashboardRequest setUpdateParameterSyntax(Boolean updateParameterSyntax) { + this.updateParameterSyntax = updateParameterSyntax; + return this; + } + + public Boolean getUpdateParameterSyntax() { + return updateParameterSyntax; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -55,12 +71,13 @@ public boolean equals(Object o) { MigrateDashboardRequest that = (MigrateDashboardRequest) o; return Objects.equals(displayName, that.displayName) && Objects.equals(parentPath, that.parentPath) - && Objects.equals(sourceDashboardId, that.sourceDashboardId); + && Objects.equals(sourceDashboardId, that.sourceDashboardId) + && Objects.equals(updateParameterSyntax, that.updateParameterSyntax); } @Override public int hashCode() { - return Objects.hash(displayName, parentPath, sourceDashboardId); + return Objects.hash(displayName, parentPath, sourceDashboardId, updateParameterSyntax); } @Override @@ -69,6 +86,7 @@ public String toString() { .add("displayName", displayName) .add("parentPath", parentPath) .add("sourceDashboardId", sourceDashboardId) + .add("updateParameterSyntax", updateParameterSyntax) .toString(); } } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/BaseRun.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/BaseRun.java index 1b3a99c1b..14ff9aded 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/BaseRun.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/BaseRun.java @@ -202,7 +202,8 @@ public class BaseRun { * previously failed run. This occurs when you request to re-run the job in case of failures. * * `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`: * Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is - * triggered by a table update. + * triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to + * manually restart a continuous job run. */ @JsonProperty("trigger") private TriggerType trigger; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomTaskRunState.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomTaskRunState.java index 704f0a77f..5369e357c 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomTaskRunState.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomTaskRunState.java @@ -7,7 +7,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Objects; -/** Stores the run state of the clean room notebook V1 task. */ +/** Stores the run state of the clean rooms notebook task. */ @Generated public class CleanRoomTaskRunState { /** diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomsNotebookTask.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomsNotebookTask.java new file mode 100755 index 000000000..2c53eebed --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomsNotebookTask.java @@ -0,0 +1,94 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.jobs; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Map; +import java.util.Objects; + +@Generated +public class CleanRoomsNotebookTask { + /** The clean room that the notebook belongs to. */ + @JsonProperty("clean_room_name") + private String cleanRoomName; + + /** + * Checksum to validate the freshness of the notebook resource (i.e. the notebook being run is the + * latest version). It can be fetched by calling the :method:cleanroomassets/get API. + */ + @JsonProperty("etag") + private String etag; + + /** Base parameters to be used for the clean room notebook job. */ + @JsonProperty("notebook_base_parameters") + private Map notebookBaseParameters; + + /** Name of the notebook being run. */ + @JsonProperty("notebook_name") + private String notebookName; + + public CleanRoomsNotebookTask setCleanRoomName(String cleanRoomName) { + this.cleanRoomName = cleanRoomName; + return this; + } + + public String getCleanRoomName() { + return cleanRoomName; + } + + public CleanRoomsNotebookTask setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + public CleanRoomsNotebookTask setNotebookBaseParameters( + Map notebookBaseParameters) { + this.notebookBaseParameters = notebookBaseParameters; + return this; + } + + public Map getNotebookBaseParameters() { + return notebookBaseParameters; + } + + public CleanRoomsNotebookTask setNotebookName(String notebookName) { + this.notebookName = notebookName; + return this; + } + + public String getNotebookName() { + return notebookName; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CleanRoomsNotebookTask that = (CleanRoomsNotebookTask) o; + return Objects.equals(cleanRoomName, that.cleanRoomName) + && Objects.equals(etag, that.etag) + && Objects.equals(notebookBaseParameters, that.notebookBaseParameters) + && Objects.equals(notebookName, that.notebookName); + } + + @Override + public int hashCode() { + return Objects.hash(cleanRoomName, etag, notebookBaseParameters, notebookName); + } + + @Override + public String toString() { + return new ToStringer(CleanRoomsNotebookTask.class) + .add("cleanRoomName", cleanRoomName) + .add("etag", etag) + .add("notebookBaseParameters", notebookBaseParameters) + .add("notebookName", notebookName) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobsHealthMetric.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobsHealthMetric.java index 9ae30cea2..c83a38d63 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobsHealthMetric.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobsHealthMetric.java @@ -9,26 +9,26 @@ * *

* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * * `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed across - * all streams. This metric is in Private Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the - * maximum offset lag across all streams. This metric is in Private Preview. * + * all streams. This metric is in Public Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the + * maximum offset lag across all streams. This metric is in Public Preview. * * `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This - * metric is in Private Preview. * `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of - * outstanding files across all streams. This metric is in Private Preview. + * metric is in Public Preview. * `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of + * outstanding files across all streams. This metric is in Public Preview. */ @Generated public enum JobsHealthMetric { RUN_DURATION_SECONDS, // Expected total time for a run in seconds. STREAMING_BACKLOG_BYTES, // An estimate of the maximum bytes of data waiting to be consumed across // all - // streams. This metric is in Private Preview. + // streams. This metric is in Public Preview. STREAMING_BACKLOG_FILES, // An estimate of the maximum number of outstanding files across all // streams. - // This metric is in Private Preview. + // This metric is in Public Preview. STREAMING_BACKLOG_RECORDS, // An estimate of the maximum offset lag across all streams. This // metric is in - // Private Preview. + // Public Preview. STREAMING_BACKLOG_SECONDS, // An estimate of the maximum consumer delay across all streams. This // metric is - // in Private Preview. + // in Public Preview. } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobsHealthRule.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobsHealthRule.java index 406782fdd..c76e051bb 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobsHealthRule.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobsHealthRule.java @@ -14,11 +14,11 @@ public class JobsHealthRule { * *

* `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * * `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting to be consumed - * across all streams. This metric is in Private Preview. * `STREAMING_BACKLOG_RECORDS`: An - * estimate of the maximum offset lag across all streams. This metric is in Private Preview. * + * across all streams. This metric is in Public Preview. * `STREAMING_BACKLOG_RECORDS`: An + * estimate of the maximum offset lag across all streams. This metric is in Public Preview. * * `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay across all streams. This - * metric is in Private Preview. * `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of - * outstanding files across all streams. This metric is in Private Preview. + * metric is in Public Preview. * `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of + * outstanding files across all streams. This metric is in Public Preview. */ @JsonProperty("metric") private JobsHealthMetric metric; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Run.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Run.java index f076ba72e..d5518d321 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Run.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Run.java @@ -211,7 +211,8 @@ public class Run { * previously failed run. This occurs when you request to re-run the job in case of failures. * * `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`: * Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is - * triggered by a table update. + * triggered by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to + * manually restart a continuous job run. */ @JsonProperty("trigger") private TriggerType trigger; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RunTask.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RunTask.java index 07c7e410a..8d9371cdb 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RunTask.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RunTask.java @@ -21,6 +21,14 @@ public class RunTask { @JsonProperty("attempt_number") private Long attemptNumber; + /** + * The task runs a [clean rooms] notebook when the `clean_rooms_notebook_task` field is present. + * + *

[clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html + */ + @JsonProperty("clean_rooms_notebook_task") + private CleanRoomsNotebookTask cleanRoomsNotebookTask; + /** * The time in milliseconds it took to terminate the cluster and clean up any associated * artifacts. The duration of a task run is the sum of the `setup_duration`, `execution_duration`, @@ -283,6 +291,15 @@ public Long getAttemptNumber() { return attemptNumber; } + public RunTask setCleanRoomsNotebookTask(CleanRoomsNotebookTask cleanRoomsNotebookTask) { + this.cleanRoomsNotebookTask = cleanRoomsNotebookTask; + return this; + } + + public CleanRoomsNotebookTask getCleanRoomsNotebookTask() { + return cleanRoomsNotebookTask; + } + public RunTask setCleanupDuration(Long cleanupDuration) { this.cleanupDuration = cleanupDuration; return this; @@ -631,6 +648,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; RunTask that = (RunTask) o; return Objects.equals(attemptNumber, that.attemptNumber) + && Objects.equals(cleanRoomsNotebookTask, that.cleanRoomsNotebookTask) && Objects.equals(cleanupDuration, that.cleanupDuration) && Objects.equals(clusterInstance, that.clusterInstance) && Objects.equals(conditionTask, that.conditionTask) @@ -675,6 +693,7 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash( attemptNumber, + cleanRoomsNotebookTask, cleanupDuration, clusterInstance, conditionTask, @@ -719,6 +738,7 @@ public int hashCode() { public String toString() { return new ToStringer(RunTask.class) .add("attemptNumber", attemptNumber) + .add("cleanRoomsNotebookTask", cleanRoomsNotebookTask) .add("cleanupDuration", cleanupDuration) .add("clusterInstance", clusterInstance) .add("conditionTask", conditionTask) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/SubmitTask.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/SubmitTask.java index be1e79187..985f0bcfd 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/SubmitTask.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/SubmitTask.java @@ -10,6 +10,14 @@ @Generated public class SubmitTask { + /** + * The task runs a [clean rooms] notebook when the `clean_rooms_notebook_task` field is present. + * + *

[clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html + */ + @JsonProperty("clean_rooms_notebook_task") + private CleanRoomsNotebookTask cleanRoomsNotebookTask; + /** * The task evaluates a condition that can be used to control the execution of other tasks when * the `condition_task` field is present. The condition task does not require a cluster to execute @@ -170,6 +178,15 @@ public class SubmitTask { @JsonProperty("webhook_notifications") private WebhookNotifications webhookNotifications; + public SubmitTask setCleanRoomsNotebookTask(CleanRoomsNotebookTask cleanRoomsNotebookTask) { + this.cleanRoomsNotebookTask = cleanRoomsNotebookTask; + return this; + } + + public CleanRoomsNotebookTask getCleanRoomsNotebookTask() { + return cleanRoomsNotebookTask; + } + public SubmitTask setConditionTask(ConditionTask conditionTask) { this.conditionTask = conditionTask; return this; @@ -391,7 +408,8 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; SubmitTask that = (SubmitTask) o; - return Objects.equals(conditionTask, that.conditionTask) + return Objects.equals(cleanRoomsNotebookTask, that.cleanRoomsNotebookTask) + && Objects.equals(conditionTask, that.conditionTask) && Objects.equals(dbtTask, that.dbtTask) && Objects.equals(dependsOn, that.dependsOn) && Objects.equals(description, that.description) @@ -420,6 +438,7 @@ public boolean equals(Object o) { @Override public int hashCode() { return Objects.hash( + cleanRoomsNotebookTask, conditionTask, dbtTask, dependsOn, @@ -449,6 +468,7 @@ public int hashCode() { @Override public String toString() { return new ToStringer(SubmitTask.class) + .add("cleanRoomsNotebookTask", cleanRoomsNotebookTask) .add("conditionTask", conditionTask) .add("dbtTask", dbtTask) .add("dependsOn", dependsOn) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Task.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Task.java index 011b3ee30..5805eeb5e 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Task.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Task.java @@ -10,6 +10,14 @@ @Generated public class Task { + /** + * The task runs a [clean rooms] notebook when the `clean_rooms_notebook_task` field is present. + * + *

[clean rooms]: https://docs.databricks.com/en/clean-rooms/index.html + */ + @JsonProperty("clean_rooms_notebook_task") + private CleanRoomsNotebookTask cleanRoomsNotebookTask; + /** * The task evaluates a condition that can be used to control the execution of other tasks when * the `condition_task` field is present. The condition task does not require a cluster to execute @@ -209,6 +217,15 @@ public class Task { @JsonProperty("webhook_notifications") private WebhookNotifications webhookNotifications; + public Task setCleanRoomsNotebookTask(CleanRoomsNotebookTask cleanRoomsNotebookTask) { + this.cleanRoomsNotebookTask = cleanRoomsNotebookTask; + return this; + } + + public CleanRoomsNotebookTask getCleanRoomsNotebookTask() { + return cleanRoomsNotebookTask; + } + public Task setConditionTask(ConditionTask conditionTask) { this.conditionTask = conditionTask; return this; @@ -475,7 +492,8 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Task that = (Task) o; - return Objects.equals(conditionTask, that.conditionTask) + return Objects.equals(cleanRoomsNotebookTask, that.cleanRoomsNotebookTask) + && Objects.equals(conditionTask, that.conditionTask) && Objects.equals(dbtTask, that.dbtTask) && Objects.equals(dependsOn, that.dependsOn) && Objects.equals(description, that.description) @@ -509,6 +527,7 @@ public boolean equals(Object o) { @Override public int hashCode() { return Objects.hash( + cleanRoomsNotebookTask, conditionTask, dbtTask, dependsOn, @@ -543,6 +562,7 @@ public int hashCode() { @Override public String toString() { return new ToStringer(Task.class) + .add("cleanRoomsNotebookTask", cleanRoomsNotebookTask) .add("conditionTask", conditionTask) .add("dbtTask", dbtTask) .add("dependsOn", dependsOn) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/TriggerType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/TriggerType.java index 2e71ddbf4..898287736 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/TriggerType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/TriggerType.java @@ -13,7 +13,8 @@ * previously failed run. This occurs when you request to re-run the job in case of failures. * * `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * `FILE_ARRIVAL`: * Indicates a run that is triggered by a file arrival. * `TABLE`: Indicates a run that is triggered - * by a table update. + * by a table update. * `CONTINUOUS_RESTART`: Indicates a run created by user to manually restart a + * continuous job run. */ @Generated public enum TriggerType { diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyAPI.java new file mode 100755 index 000000000..56185f68a --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyAPI.java @@ -0,0 +1,120 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.Paginator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * These APIs manage account federation policies. + * + *

Account federation policies allow users and service principals in your Databricks account to + * securely access Databricks APIs using tokens from your trusted identity providers (IdPs). + * + *

With token federation, your users and service principals can exchange tokens from your IdP for + * Databricks OAuth tokens, which can be used to access Databricks APIs. Token federation eliminates + * the need to manage Databricks secrets, and allows you to centralize management of token issuance + * policies in your IdP. Databricks token federation is typically used in combination with [SCIM], + * so users in your IdP are synchronized into your Databricks account. + * + *

Token federation is configured in your Databricks account using an account federation policy. + * An account federation policy specifies: * which IdP, or issuer, your Databricks account should + * accept tokens from * how to determine which Databricks user, or subject, a token is issued for + * + *

To configure a federation policy, you provide the following: * The required token __issuer__, + * as specified in the “iss” claim of your tokens. The issuer is an https URL that identifies your + * IdP. * The allowed token __audiences__, as specified in the “aud” claim of your tokens. This + * identifier is intended to represent the recipient of the token. As long as the audience in the + * token matches at least one audience in the policy, the token is considered a match. If + * unspecified, the default value is your Databricks account id. * The __subject claim__, which + * indicates which token claim contains the Databricks username of the user the token was issued + * for. If unspecified, the default value is “sub”. * Optionally, the public keys used to validate + * the signature of your tokens, in JWKS format. If unspecified (recommended), Databricks + * automatically fetches the public keys from your issuer’s well known endpoint. Databricks strongly + * recommends relying on your issuer’s well known endpoint for discovering public keys. + * + *

An example federation policy is: ``` issuer: "https://idp.mycompany.com/oidc" audiences: + * ["databricks"] subject_claim: "sub" ``` + * + *

An example JWT token body that matches this policy and could be used to authenticate to + * Databricks as user `username@mycompany.com` is: ``` { "iss": "https://idp.mycompany.com/oidc", + * "aud": "databricks", "sub": "username@mycompany.com" } ``` + * + *

You may also need to configure your IdP to generate tokens for your users to exchange with + * Databricks, if your users do not already have the ability to generate tokens that are compatible + * with your federation policy. + * + *

You do not need to configure an OAuth application in Databricks to use token federation. + * + *

[SCIM]: https://docs.databricks.com/admin/users-groups/scim/index.html + */ +@Generated +public class AccountFederationPolicyAPI { + private static final Logger LOG = LoggerFactory.getLogger(AccountFederationPolicyAPI.class); + + private final AccountFederationPolicyService impl; + + /** Regular-use constructor */ + public AccountFederationPolicyAPI(ApiClient apiClient) { + impl = new AccountFederationPolicyImpl(apiClient); + } + + /** Constructor for mocks */ + public AccountFederationPolicyAPI(AccountFederationPolicyService mock) { + impl = mock; + } + + /** Create account federation policy. */ + public FederationPolicy create(CreateAccountFederationPolicyRequest request) { + return impl.create(request); + } + + public void delete(String policyId) { + delete(new DeleteAccountFederationPolicyRequest().setPolicyId(policyId)); + } + + /** Delete account federation policy. */ + public void delete(DeleteAccountFederationPolicyRequest request) { + impl.delete(request); + } + + public FederationPolicy get(String policyId) { + return get(new GetAccountFederationPolicyRequest().setPolicyId(policyId)); + } + + /** Get account federation policy. */ + public FederationPolicy get(GetAccountFederationPolicyRequest request) { + return impl.get(request); + } + + /** List account federation policies. */ + public Iterable list(ListAccountFederationPoliciesRequest request) { + return new Paginator<>( + request, + impl::list, + ListFederationPoliciesResponse::getPolicies, + response -> { + String token = response.getNextPageToken(); + if (token == null || token.isEmpty()) { + return null; + } + return request.setPageToken(token); + }); + } + + public FederationPolicy update(String policyId, String updateMask) { + return update( + new UpdateAccountFederationPolicyRequest().setPolicyId(policyId).setUpdateMask(updateMask)); + } + + /** Update account federation policy. */ + public FederationPolicy update(UpdateAccountFederationPolicyRequest request) { + return impl.update(request); + } + + public AccountFederationPolicyService impl() { + return impl; + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyImpl.java new file mode 100755 index 000000000..1572ff64b --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyImpl.java @@ -0,0 +1,70 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.support.Generated; +import java.util.HashMap; +import java.util.Map; + +/** Package-local implementation of AccountFederationPolicy */ +@Generated +class AccountFederationPolicyImpl implements AccountFederationPolicyService { + private final ApiClient apiClient; + + public AccountFederationPolicyImpl(ApiClient apiClient) { + this.apiClient = apiClient; + } + + @Override + public FederationPolicy create(CreateAccountFederationPolicyRequest request) { + String path = + String.format("/api/2.0/accounts/%s/federationPolicies", apiClient.configuredAccountID()); + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + headers.put("Content-Type", "application/json"); + return apiClient.POST(path, request.getPolicy(), FederationPolicy.class, headers); + } + + @Override + public void delete(DeleteAccountFederationPolicyRequest request) { + String path = + String.format( + "/api/2.0/accounts/%s/federationPolicies/%s", + apiClient.configuredAccountID(), request.getPolicyId()); + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + apiClient.DELETE(path, request, DeleteResponse.class, headers); + } + + @Override + public FederationPolicy get(GetAccountFederationPolicyRequest request) { + String path = + String.format( + "/api/2.0/accounts/%s/federationPolicies/%s", + apiClient.configuredAccountID(), request.getPolicyId()); + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + return apiClient.GET(path, request, FederationPolicy.class, headers); + } + + @Override + public ListFederationPoliciesResponse list(ListAccountFederationPoliciesRequest request) { + String path = + String.format("/api/2.0/accounts/%s/federationPolicies", apiClient.configuredAccountID()); + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + return apiClient.GET(path, request, ListFederationPoliciesResponse.class, headers); + } + + @Override + public FederationPolicy update(UpdateAccountFederationPolicyRequest request) { + String path = + String.format( + "/api/2.0/accounts/%s/federationPolicies/%s", + apiClient.configuredAccountID(), request.getPolicyId()); + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + headers.put("Content-Type", "application/json"); + return apiClient.PATCH(path, request.getPolicy(), FederationPolicy.class, headers); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyService.java new file mode 100755 index 000000000..88460ac87 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/AccountFederationPolicyService.java @@ -0,0 +1,72 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; + +/** + * These APIs manage account federation policies. + * + *

Account federation policies allow users and service principals in your Databricks account to + * securely access Databricks APIs using tokens from your trusted identity providers (IdPs). + * + *

With token federation, your users and service principals can exchange tokens from your IdP for + * Databricks OAuth tokens, which can be used to access Databricks APIs. Token federation eliminates + * the need to manage Databricks secrets, and allows you to centralize management of token issuance + * policies in your IdP. Databricks token federation is typically used in combination with [SCIM], + * so users in your IdP are synchronized into your Databricks account. + * + *

Token federation is configured in your Databricks account using an account federation policy. + * An account federation policy specifies: * which IdP, or issuer, your Databricks account should + * accept tokens from * how to determine which Databricks user, or subject, a token is issued for + * + *

To configure a federation policy, you provide the following: * The required token __issuer__, + * as specified in the “iss” claim of your tokens. The issuer is an https URL that identifies your + * IdP. * The allowed token __audiences__, as specified in the “aud” claim of your tokens. This + * identifier is intended to represent the recipient of the token. As long as the audience in the + * token matches at least one audience in the policy, the token is considered a match. If + * unspecified, the default value is your Databricks account id. * The __subject claim__, which + * indicates which token claim contains the Databricks username of the user the token was issued + * for. If unspecified, the default value is “sub”. * Optionally, the public keys used to validate + * the signature of your tokens, in JWKS format. If unspecified (recommended), Databricks + * automatically fetches the public keys from your issuer’s well known endpoint. Databricks strongly + * recommends relying on your issuer’s well known endpoint for discovering public keys. + * + *

An example federation policy is: ``` issuer: "https://idp.mycompany.com/oidc" audiences: + * ["databricks"] subject_claim: "sub" ``` + * + *

An example JWT token body that matches this policy and could be used to authenticate to + * Databricks as user `username@mycompany.com` is: ``` { "iss": "https://idp.mycompany.com/oidc", + * "aud": "databricks", "sub": "username@mycompany.com" } ``` + * + *

You may also need to configure your IdP to generate tokens for your users to exchange with + * Databricks, if your users do not already have the ability to generate tokens that are compatible + * with your federation policy. + * + *

You do not need to configure an OAuth application in Databricks to use token federation. + * + *

[SCIM]: https://docs.databricks.com/admin/users-groups/scim/index.html + * + *

This is the high-level interface, that contains generated methods. + * + *

Evolving: this interface is under development. Method signatures may change. + */ +@Generated +public interface AccountFederationPolicyService { + /** Create account federation policy. */ + FederationPolicy create( + CreateAccountFederationPolicyRequest createAccountFederationPolicyRequest); + + /** Delete account federation policy. */ + void delete(DeleteAccountFederationPolicyRequest deleteAccountFederationPolicyRequest); + + /** Get account federation policy. */ + FederationPolicy get(GetAccountFederationPolicyRequest getAccountFederationPolicyRequest); + + /** List account federation policies. */ + ListFederationPoliciesResponse list( + ListAccountFederationPoliciesRequest listAccountFederationPoliciesRequest); + + /** Update account federation policy. */ + FederationPolicy update( + UpdateAccountFederationPolicyRequest updateAccountFederationPolicyRequest); +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateAccountFederationPolicyRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateAccountFederationPolicyRequest.java new file mode 100755 index 000000000..d7391eb14 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateAccountFederationPolicyRequest.java @@ -0,0 +1,65 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** Create account federation policy */ +@Generated +public class CreateAccountFederationPolicyRequest { + /** */ + @JsonProperty("policy") + private FederationPolicy policy; + + /** + * The identifier for the federation policy. If unspecified, the id will be assigned by + * Databricks. + */ + @JsonIgnore + @QueryParam("policy_id") + private String policyId; + + public CreateAccountFederationPolicyRequest setPolicy(FederationPolicy policy) { + this.policy = policy; + return this; + } + + public FederationPolicy getPolicy() { + return policy; + } + + public CreateAccountFederationPolicyRequest setPolicyId(String policyId) { + this.policyId = policyId; + return this; + } + + public String getPolicyId() { + return policyId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CreateAccountFederationPolicyRequest that = (CreateAccountFederationPolicyRequest) o; + return Objects.equals(policy, that.policy) && Objects.equals(policyId, that.policyId); + } + + @Override + public int hashCode() { + return Objects.hash(policy, policyId); + } + + @Override + public String toString() { + return new ToStringer(CreateAccountFederationPolicyRequest.class) + .add("policy", policy) + .add("policyId", policyId) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateServicePrincipalFederationPolicyRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateServicePrincipalFederationPolicyRequest.java new file mode 100755 index 000000000..7d1e1b78f --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/CreateServicePrincipalFederationPolicyRequest.java @@ -0,0 +1,82 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** Create service principal federation policy */ +@Generated +public class CreateServicePrincipalFederationPolicyRequest { + /** */ + @JsonProperty("policy") + private FederationPolicy policy; + + /** + * The identifier for the federation policy. If unspecified, the id will be assigned by + * Databricks. + */ + @JsonIgnore + @QueryParam("policy_id") + private String policyId; + + /** The service principal id for the federation policy. */ + @JsonIgnore private Long servicePrincipalId; + + public CreateServicePrincipalFederationPolicyRequest setPolicy(FederationPolicy policy) { + this.policy = policy; + return this; + } + + public FederationPolicy getPolicy() { + return policy; + } + + public CreateServicePrincipalFederationPolicyRequest setPolicyId(String policyId) { + this.policyId = policyId; + return this; + } + + public String getPolicyId() { + return policyId; + } + + public CreateServicePrincipalFederationPolicyRequest setServicePrincipalId( + Long servicePrincipalId) { + this.servicePrincipalId = servicePrincipalId; + return this; + } + + public Long getServicePrincipalId() { + return servicePrincipalId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CreateServicePrincipalFederationPolicyRequest that = + (CreateServicePrincipalFederationPolicyRequest) o; + return Objects.equals(policy, that.policy) + && Objects.equals(policyId, that.policyId) + && Objects.equals(servicePrincipalId, that.servicePrincipalId); + } + + @Override + public int hashCode() { + return Objects.hash(policy, policyId, servicePrincipalId); + } + + @Override + public String toString() { + return new ToStringer(CreateServicePrincipalFederationPolicyRequest.class) + .add("policy", policy) + .add("policyId", policyId) + .add("servicePrincipalId", servicePrincipalId) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteAccountFederationPolicyRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteAccountFederationPolicyRequest.java new file mode 100755 index 000000000..42e92132a --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteAccountFederationPolicyRequest.java @@ -0,0 +1,44 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Delete account federation policy */ +@Generated +public class DeleteAccountFederationPolicyRequest { + /** */ + @JsonIgnore private String policyId; + + public DeleteAccountFederationPolicyRequest setPolicyId(String policyId) { + this.policyId = policyId; + return this; + } + + public String getPolicyId() { + return policyId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteAccountFederationPolicyRequest that = (DeleteAccountFederationPolicyRequest) o; + return Objects.equals(policyId, that.policyId); + } + + @Override + public int hashCode() { + return Objects.hash(policyId); + } + + @Override + public String toString() { + return new ToStringer(DeleteAccountFederationPolicyRequest.class) + .add("policyId", policyId) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteServicePrincipalFederationPolicyRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteServicePrincipalFederationPolicyRequest.java new file mode 100755 index 000000000..c72cba6de --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/DeleteServicePrincipalFederationPolicyRequest.java @@ -0,0 +1,60 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Delete service principal federation policy */ +@Generated +public class DeleteServicePrincipalFederationPolicyRequest { + /** */ + @JsonIgnore private String policyId; + + /** The service principal id for the federation policy. */ + @JsonIgnore private Long servicePrincipalId; + + public DeleteServicePrincipalFederationPolicyRequest setPolicyId(String policyId) { + this.policyId = policyId; + return this; + } + + public String getPolicyId() { + return policyId; + } + + public DeleteServicePrincipalFederationPolicyRequest setServicePrincipalId( + Long servicePrincipalId) { + this.servicePrincipalId = servicePrincipalId; + return this; + } + + public Long getServicePrincipalId() { + return servicePrincipalId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteServicePrincipalFederationPolicyRequest that = + (DeleteServicePrincipalFederationPolicyRequest) o; + return Objects.equals(policyId, that.policyId) + && Objects.equals(servicePrincipalId, that.servicePrincipalId); + } + + @Override + public int hashCode() { + return Objects.hash(policyId, servicePrincipalId); + } + + @Override + public String toString() { + return new ToStringer(DeleteServicePrincipalFederationPolicyRequest.class) + .add("policyId", policyId) + .add("servicePrincipalId", servicePrincipalId) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/FederationPolicy.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/FederationPolicy.java new file mode 100755 index 000000000..feb093234 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/FederationPolicy.java @@ -0,0 +1,122 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +@Generated +public class FederationPolicy { + /** Creation time of the federation policy. */ + @JsonProperty("create_time") + private String createTime; + + /** Description of the federation policy. */ + @JsonProperty("description") + private String description; + + /** + * Name of the federation policy. The name must contain only lowercase alphanumeric characters, + * numbers, and hyphens. It must be unique within the account. + */ + @JsonProperty("name") + private String name; + + /** Specifies the policy to use for validating OIDC claims in your federated tokens. */ + @JsonProperty("oidc_policy") + private OidcFederationPolicy oidcPolicy; + + /** Unique, immutable id of the federation policy. */ + @JsonProperty("uid") + private String uid; + + /** Last update time of the federation policy. */ + @JsonProperty("update_time") + private String updateTime; + + public FederationPolicy setCreateTime(String createTime) { + this.createTime = createTime; + return this; + } + + public String getCreateTime() { + return createTime; + } + + public FederationPolicy setDescription(String description) { + this.description = description; + return this; + } + + public String getDescription() { + return description; + } + + public FederationPolicy setName(String name) { + this.name = name; + return this; + } + + public String getName() { + return name; + } + + public FederationPolicy setOidcPolicy(OidcFederationPolicy oidcPolicy) { + this.oidcPolicy = oidcPolicy; + return this; + } + + public OidcFederationPolicy getOidcPolicy() { + return oidcPolicy; + } + + public FederationPolicy setUid(String uid) { + this.uid = uid; + return this; + } + + public String getUid() { + return uid; + } + + public FederationPolicy setUpdateTime(String updateTime) { + this.updateTime = updateTime; + return this; + } + + public String getUpdateTime() { + return updateTime; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FederationPolicy that = (FederationPolicy) o; + return Objects.equals(createTime, that.createTime) + && Objects.equals(description, that.description) + && Objects.equals(name, that.name) + && Objects.equals(oidcPolicy, that.oidcPolicy) + && Objects.equals(uid, that.uid) + && Objects.equals(updateTime, that.updateTime); + } + + @Override + public int hashCode() { + return Objects.hash(createTime, description, name, oidcPolicy, uid, updateTime); + } + + @Override + public String toString() { + return new ToStringer(FederationPolicy.class) + .add("createTime", createTime) + .add("description", description) + .add("name", name) + .add("oidcPolicy", oidcPolicy) + .add("uid", uid) + .add("updateTime", updateTime) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetAccountFederationPolicyRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetAccountFederationPolicyRequest.java new file mode 100755 index 000000000..dfe03d950 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetAccountFederationPolicyRequest.java @@ -0,0 +1,44 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Get account federation policy */ +@Generated +public class GetAccountFederationPolicyRequest { + /** */ + @JsonIgnore private String policyId; + + public GetAccountFederationPolicyRequest setPolicyId(String policyId) { + this.policyId = policyId; + return this; + } + + public String getPolicyId() { + return policyId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetAccountFederationPolicyRequest that = (GetAccountFederationPolicyRequest) o; + return Objects.equals(policyId, that.policyId); + } + + @Override + public int hashCode() { + return Objects.hash(policyId); + } + + @Override + public String toString() { + return new ToStringer(GetAccountFederationPolicyRequest.class) + .add("policyId", policyId) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetServicePrincipalFederationPolicyRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetServicePrincipalFederationPolicyRequest.java new file mode 100755 index 000000000..0738ebdc3 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/GetServicePrincipalFederationPolicyRequest.java @@ -0,0 +1,59 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Get service principal federation policy */ +@Generated +public class GetServicePrincipalFederationPolicyRequest { + /** */ + @JsonIgnore private String policyId; + + /** The service principal id for the federation policy. */ + @JsonIgnore private Long servicePrincipalId; + + public GetServicePrincipalFederationPolicyRequest setPolicyId(String policyId) { + this.policyId = policyId; + return this; + } + + public String getPolicyId() { + return policyId; + } + + public GetServicePrincipalFederationPolicyRequest setServicePrincipalId(Long servicePrincipalId) { + this.servicePrincipalId = servicePrincipalId; + return this; + } + + public Long getServicePrincipalId() { + return servicePrincipalId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetServicePrincipalFederationPolicyRequest that = + (GetServicePrincipalFederationPolicyRequest) o; + return Objects.equals(policyId, that.policyId) + && Objects.equals(servicePrincipalId, that.servicePrincipalId); + } + + @Override + public int hashCode() { + return Objects.hash(policyId, servicePrincipalId); + } + + @Override + public String toString() { + return new ToStringer(GetServicePrincipalFederationPolicyRequest.class) + .add("policyId", policyId) + .add("servicePrincipalId", servicePrincipalId) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListAccountFederationPoliciesRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListAccountFederationPoliciesRequest.java new file mode 100755 index 000000000..cda3d1b8a --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListAccountFederationPoliciesRequest.java @@ -0,0 +1,62 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** List account federation policies */ +@Generated +public class ListAccountFederationPoliciesRequest { + /** */ + @JsonIgnore + @QueryParam("page_size") + private Long pageSize; + + /** */ + @JsonIgnore + @QueryParam("page_token") + private String pageToken; + + public ListAccountFederationPoliciesRequest setPageSize(Long pageSize) { + this.pageSize = pageSize; + return this; + } + + public Long getPageSize() { + return pageSize; + } + + public ListAccountFederationPoliciesRequest setPageToken(String pageToken) { + this.pageToken = pageToken; + return this; + } + + public String getPageToken() { + return pageToken; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ListAccountFederationPoliciesRequest that = (ListAccountFederationPoliciesRequest) o; + return Objects.equals(pageSize, that.pageSize) && Objects.equals(pageToken, that.pageToken); + } + + @Override + public int hashCode() { + return Objects.hash(pageSize, pageToken); + } + + @Override + public String toString() { + return new ToStringer(ListAccountFederationPoliciesRequest.class) + .add("pageSize", pageSize) + .add("pageToken", pageToken) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListFederationPoliciesResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListFederationPoliciesResponse.java new file mode 100755 index 000000000..a4c77a09b --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListFederationPoliciesResponse.java @@ -0,0 +1,60 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Collection; +import java.util.Objects; + +@Generated +public class ListFederationPoliciesResponse { + /** */ + @JsonProperty("next_page_token") + private String nextPageToken; + + /** */ + @JsonProperty("policies") + private Collection policies; + + public ListFederationPoliciesResponse setNextPageToken(String nextPageToken) { + this.nextPageToken = nextPageToken; + return this; + } + + public String getNextPageToken() { + return nextPageToken; + } + + public ListFederationPoliciesResponse setPolicies(Collection policies) { + this.policies = policies; + return this; + } + + public Collection getPolicies() { + return policies; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ListFederationPoliciesResponse that = (ListFederationPoliciesResponse) o; + return Objects.equals(nextPageToken, that.nextPageToken) + && Objects.equals(policies, that.policies); + } + + @Override + public int hashCode() { + return Objects.hash(nextPageToken, policies); + } + + @Override + public String toString() { + return new ToStringer(ListFederationPoliciesResponse.class) + .add("nextPageToken", nextPageToken) + .add("policies", policies) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListServicePrincipalFederationPoliciesRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListServicePrincipalFederationPoliciesRequest.java new file mode 100755 index 000000000..52ebe4dbb --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ListServicePrincipalFederationPoliciesRequest.java @@ -0,0 +1,79 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** List service principal federation policies */ +@Generated +public class ListServicePrincipalFederationPoliciesRequest { + /** */ + @JsonIgnore + @QueryParam("page_size") + private Long pageSize; + + /** */ + @JsonIgnore + @QueryParam("page_token") + private String pageToken; + + /** The service principal id for the federation policy. */ + @JsonIgnore private Long servicePrincipalId; + + public ListServicePrincipalFederationPoliciesRequest setPageSize(Long pageSize) { + this.pageSize = pageSize; + return this; + } + + public Long getPageSize() { + return pageSize; + } + + public ListServicePrincipalFederationPoliciesRequest setPageToken(String pageToken) { + this.pageToken = pageToken; + return this; + } + + public String getPageToken() { + return pageToken; + } + + public ListServicePrincipalFederationPoliciesRequest setServicePrincipalId( + Long servicePrincipalId) { + this.servicePrincipalId = servicePrincipalId; + return this; + } + + public Long getServicePrincipalId() { + return servicePrincipalId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ListServicePrincipalFederationPoliciesRequest that = + (ListServicePrincipalFederationPoliciesRequest) o; + return Objects.equals(pageSize, that.pageSize) + && Objects.equals(pageToken, that.pageToken) + && Objects.equals(servicePrincipalId, that.servicePrincipalId); + } + + @Override + public int hashCode() { + return Objects.hash(pageSize, pageToken, servicePrincipalId); + } + + @Override + public String toString() { + return new ToStringer(ListServicePrincipalFederationPoliciesRequest.class) + .add("pageSize", pageSize) + .add("pageToken", pageToken) + .add("servicePrincipalId", servicePrincipalId) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/OidcFederationPolicy.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/OidcFederationPolicy.java new file mode 100755 index 000000000..ffe72ba17 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/OidcFederationPolicy.java @@ -0,0 +1,123 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Collection; +import java.util.Objects; + +/** Specifies the policy to use for validating OIDC claims in your federated tokens. */ +@Generated +public class OidcFederationPolicy { + /** + * The allowed token audiences, as specified in the 'aud' claim of federated tokens. The audience + * identifier is intended to represent the recipient of the token. Can be any non-empty string + * value. As long as the audience in the token matches at least one audience in the policy, the + * token is considered a match. If audiences is unspecified, defaults to your Databricks account + * id. + */ + @JsonProperty("audiences") + private Collection audiences; + + /** The required token issuer, as specified in the 'iss' claim of federated tokens. */ + @JsonProperty("issuer") + private String issuer; + + /** + * The public keys used to validate the signature of federated tokens, in JWKS format. If + * unspecified (recommended), Databricks automatically fetches the public keys from your issuer’s + * well known endpoint. Databricks strongly recommends relying on your issuer’s well known + * endpoint for discovering public keys. + */ + @JsonProperty("jwks_json") + private String jwksJson; + + /** + * The required token subject, as specified in the subject claim of federated tokens. Must be + * specified for service principal federation policies. Must not be specified for account + * federation policies. + */ + @JsonProperty("subject") + private String subject; + + /** + * The claim that contains the subject of the token. If unspecified, the default value is 'sub'. + */ + @JsonProperty("subject_claim") + private String subjectClaim; + + public OidcFederationPolicy setAudiences(Collection audiences) { + this.audiences = audiences; + return this; + } + + public Collection getAudiences() { + return audiences; + } + + public OidcFederationPolicy setIssuer(String issuer) { + this.issuer = issuer; + return this; + } + + public String getIssuer() { + return issuer; + } + + public OidcFederationPolicy setJwksJson(String jwksJson) { + this.jwksJson = jwksJson; + return this; + } + + public String getJwksJson() { + return jwksJson; + } + + public OidcFederationPolicy setSubject(String subject) { + this.subject = subject; + return this; + } + + public String getSubject() { + return subject; + } + + public OidcFederationPolicy setSubjectClaim(String subjectClaim) { + this.subjectClaim = subjectClaim; + return this; + } + + public String getSubjectClaim() { + return subjectClaim; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + OidcFederationPolicy that = (OidcFederationPolicy) o; + return Objects.equals(audiences, that.audiences) + && Objects.equals(issuer, that.issuer) + && Objects.equals(jwksJson, that.jwksJson) + && Objects.equals(subject, that.subject) + && Objects.equals(subjectClaim, that.subjectClaim); + } + + @Override + public int hashCode() { + return Objects.hash(audiences, issuer, jwksJson, subject, subjectClaim); + } + + @Override + public String toString() { + return new ToStringer(OidcFederationPolicy.class) + .add("audiences", audiences) + .add("issuer", issuer) + .add("jwksJson", jwksJson) + .add("subject", subject) + .add("subjectClaim", subjectClaim) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyAPI.java new file mode 100755 index 000000000..470ad815f --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyAPI.java @@ -0,0 +1,144 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.Paginator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * These APIs manage service principal federation policies. + * + *

Service principal federation, also known as Workload Identity Federation, allows your + * automated workloads running outside of Databricks to securely access Databricks APIs without the + * need for Databricks secrets. With Workload Identity Federation, your application (or workload) + * authenticates to Databricks as a Databricks service principal, using tokens provided by the + * workload runtime. + * + *

Databricks strongly recommends using Workload Identity Federation to authenticate to + * Databricks from automated workloads, over alternatives such as OAuth client secrets or Personal + * Access Tokens, whenever possible. Workload Identity Federation is supported by many popular + * services, including Github Actions, Azure DevOps, GitLab, Terraform Cloud, and Kubernetes + * clusters, among others. + * + *

Workload identity federation is configured in your Databricks account using a service + * principal federation policy. A service principal federation policy specifies: * which IdP, or + * issuer, the service principal is allowed to authenticate from * which workload identity, or + * subject, is allowed to authenticate as the Databricks service principal + * + *

To configure a federation policy, you provide the following: * The required token __issuer__, + * as specified in the “iss” claim of workload identity tokens. The issuer is an https URL that + * identifies the workload identity provider. * The required token __subject__, as specified in the + * “sub” claim of workload identity tokens. The subject uniquely identifies the workload in the + * workload runtime environment. * The allowed token __audiences__, as specified in the “aud” claim + * of workload identity tokens. The audience is intended to represent the recipient of the token. As + * long as the audience in the token matches at least one audience in the policy, the token is + * considered a match. If unspecified, the default value is your Databricks account id. * + * Optionally, the public keys used to validate the signature of the workload identity tokens, in + * JWKS format. If unspecified (recommended), Databricks automatically fetches the public keys from + * the issuer’s well known endpoint. Databricks strongly recommends relying on the issuer’s well + * known endpoint for discovering public keys. + * + *

An example service principal federation policy, for a Github Actions workload, is: ``` issuer: + * "https://token.actions.githubusercontent.com" audiences: ["https://github.com/my-github-org"] + * subject: "repo:my-github-org/my-repo:environment:prod" ``` + * + *

An example JWT token body that matches this policy and could be used to authenticate to + * Databricks is: ``` { "iss": "https://token.actions.githubusercontent.com", "aud": + * "https://github.com/my-github-org", "sub": "repo:my-github-org/my-repo:environment:prod" } ``` + * + *

You may also need to configure the workload runtime to generate tokens for your workloads. + * + *

You do not need to configure an OAuth application in Databricks to use token federation. + */ +@Generated +public class ServicePrincipalFederationPolicyAPI { + private static final Logger LOG = + LoggerFactory.getLogger(ServicePrincipalFederationPolicyAPI.class); + + private final ServicePrincipalFederationPolicyService impl; + + /** Regular-use constructor */ + public ServicePrincipalFederationPolicyAPI(ApiClient apiClient) { + impl = new ServicePrincipalFederationPolicyImpl(apiClient); + } + + /** Constructor for mocks */ + public ServicePrincipalFederationPolicyAPI(ServicePrincipalFederationPolicyService mock) { + impl = mock; + } + + public FederationPolicy create(long servicePrincipalId) { + return create( + new CreateServicePrincipalFederationPolicyRequest() + .setServicePrincipalId(servicePrincipalId)); + } + + /** Create service principal federation policy. */ + public FederationPolicy create(CreateServicePrincipalFederationPolicyRequest request) { + return impl.create(request); + } + + public void delete(long servicePrincipalId, String policyId) { + delete( + new DeleteServicePrincipalFederationPolicyRequest() + .setServicePrincipalId(servicePrincipalId) + .setPolicyId(policyId)); + } + + /** Delete service principal federation policy. */ + public void delete(DeleteServicePrincipalFederationPolicyRequest request) { + impl.delete(request); + } + + public FederationPolicy get(long servicePrincipalId, String policyId) { + return get( + new GetServicePrincipalFederationPolicyRequest() + .setServicePrincipalId(servicePrincipalId) + .setPolicyId(policyId)); + } + + /** Get service principal federation policy. */ + public FederationPolicy get(GetServicePrincipalFederationPolicyRequest request) { + return impl.get(request); + } + + public Iterable list(long servicePrincipalId) { + return list( + new ListServicePrincipalFederationPoliciesRequest() + .setServicePrincipalId(servicePrincipalId)); + } + + /** List service principal federation policies. */ + public Iterable list(ListServicePrincipalFederationPoliciesRequest request) { + return new Paginator<>( + request, + impl::list, + ListFederationPoliciesResponse::getPolicies, + response -> { + String token = response.getNextPageToken(); + if (token == null || token.isEmpty()) { + return null; + } + return request.setPageToken(token); + }); + } + + public FederationPolicy update(long servicePrincipalId, String policyId, String updateMask) { + return update( + new UpdateServicePrincipalFederationPolicyRequest() + .setServicePrincipalId(servicePrincipalId) + .setPolicyId(policyId) + .setUpdateMask(updateMask)); + } + + /** Update service principal federation policy. */ + public FederationPolicy update(UpdateServicePrincipalFederationPolicyRequest request) { + return impl.update(request); + } + + public ServicePrincipalFederationPolicyService impl() { + return impl; + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyImpl.java new file mode 100755 index 000000000..4e3570397 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyImpl.java @@ -0,0 +1,81 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.support.Generated; +import java.util.HashMap; +import java.util.Map; + +/** Package-local implementation of ServicePrincipalFederationPolicy */ +@Generated +class ServicePrincipalFederationPolicyImpl implements ServicePrincipalFederationPolicyService { + private final ApiClient apiClient; + + public ServicePrincipalFederationPolicyImpl(ApiClient apiClient) { + this.apiClient = apiClient; + } + + @Override + public FederationPolicy create(CreateServicePrincipalFederationPolicyRequest request) { + String path = + String.format( + "/api/2.0/accounts/%s/servicePrincipals/%s/federationPolicies", + apiClient.configuredAccountID(), request.getServicePrincipalId()); + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + headers.put("Content-Type", "application/json"); + return apiClient.POST(path, request.getPolicy(), FederationPolicy.class, headers); + } + + @Override + public void delete(DeleteServicePrincipalFederationPolicyRequest request) { + String path = + String.format( + "/api/2.0/accounts/%s/servicePrincipals/%s/federationPolicies/%s", + apiClient.configuredAccountID(), + request.getServicePrincipalId(), + request.getPolicyId()); + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + apiClient.DELETE(path, request, DeleteResponse.class, headers); + } + + @Override + public FederationPolicy get(GetServicePrincipalFederationPolicyRequest request) { + String path = + String.format( + "/api/2.0/accounts/%s/servicePrincipals/%s/federationPolicies/%s", + apiClient.configuredAccountID(), + request.getServicePrincipalId(), + request.getPolicyId()); + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + return apiClient.GET(path, request, FederationPolicy.class, headers); + } + + @Override + public ListFederationPoliciesResponse list( + ListServicePrincipalFederationPoliciesRequest request) { + String path = + String.format( + "/api/2.0/accounts/%s/servicePrincipals/%s/federationPolicies", + apiClient.configuredAccountID(), request.getServicePrincipalId()); + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + return apiClient.GET(path, request, ListFederationPoliciesResponse.class, headers); + } + + @Override + public FederationPolicy update(UpdateServicePrincipalFederationPolicyRequest request) { + String path = + String.format( + "/api/2.0/accounts/%s/servicePrincipals/%s/federationPolicies/%s", + apiClient.configuredAccountID(), + request.getServicePrincipalId(), + request.getPolicyId()); + Map headers = new HashMap<>(); + headers.put("Accept", "application/json"); + headers.put("Content-Type", "application/json"); + return apiClient.PATCH(path, request.getPolicy(), FederationPolicy.class, headers); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyService.java new file mode 100755 index 000000000..530597c76 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/ServicePrincipalFederationPolicyService.java @@ -0,0 +1,76 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; + +/** + * These APIs manage service principal federation policies. + * + *

Service principal federation, also known as Workload Identity Federation, allows your + * automated workloads running outside of Databricks to securely access Databricks APIs without the + * need for Databricks secrets. With Workload Identity Federation, your application (or workload) + * authenticates to Databricks as a Databricks service principal, using tokens provided by the + * workload runtime. + * + *

Databricks strongly recommends using Workload Identity Federation to authenticate to + * Databricks from automated workloads, over alternatives such as OAuth client secrets or Personal + * Access Tokens, whenever possible. Workload Identity Federation is supported by many popular + * services, including Github Actions, Azure DevOps, GitLab, Terraform Cloud, and Kubernetes + * clusters, among others. + * + *

Workload identity federation is configured in your Databricks account using a service + * principal federation policy. A service principal federation policy specifies: * which IdP, or + * issuer, the service principal is allowed to authenticate from * which workload identity, or + * subject, is allowed to authenticate as the Databricks service principal + * + *

To configure a federation policy, you provide the following: * The required token __issuer__, + * as specified in the “iss” claim of workload identity tokens. The issuer is an https URL that + * identifies the workload identity provider. * The required token __subject__, as specified in the + * “sub” claim of workload identity tokens. The subject uniquely identifies the workload in the + * workload runtime environment. * The allowed token __audiences__, as specified in the “aud” claim + * of workload identity tokens. The audience is intended to represent the recipient of the token. As + * long as the audience in the token matches at least one audience in the policy, the token is + * considered a match. If unspecified, the default value is your Databricks account id. * + * Optionally, the public keys used to validate the signature of the workload identity tokens, in + * JWKS format. If unspecified (recommended), Databricks automatically fetches the public keys from + * the issuer’s well known endpoint. Databricks strongly recommends relying on the issuer’s well + * known endpoint for discovering public keys. + * + *

An example service principal federation policy, for a Github Actions workload, is: ``` issuer: + * "https://token.actions.githubusercontent.com" audiences: ["https://github.com/my-github-org"] + * subject: "repo:my-github-org/my-repo:environment:prod" ``` + * + *

An example JWT token body that matches this policy and could be used to authenticate to + * Databricks is: ``` { "iss": "https://token.actions.githubusercontent.com", "aud": + * "https://github.com/my-github-org", "sub": "repo:my-github-org/my-repo:environment:prod" } ``` + * + *

You may also need to configure the workload runtime to generate tokens for your workloads. + * + *

You do not need to configure an OAuth application in Databricks to use token federation. + * + *

This is the high-level interface, that contains generated methods. + * + *

Evolving: this interface is under development. Method signatures may change. + */ +@Generated +public interface ServicePrincipalFederationPolicyService { + /** Create service principal federation policy. */ + FederationPolicy create( + CreateServicePrincipalFederationPolicyRequest createServicePrincipalFederationPolicyRequest); + + /** Delete service principal federation policy. */ + void delete( + DeleteServicePrincipalFederationPolicyRequest deleteServicePrincipalFederationPolicyRequest); + + /** Get service principal federation policy. */ + FederationPolicy get( + GetServicePrincipalFederationPolicyRequest getServicePrincipalFederationPolicyRequest); + + /** List service principal federation policies. */ + ListFederationPoliciesResponse list( + ListServicePrincipalFederationPoliciesRequest listServicePrincipalFederationPoliciesRequest); + + /** Update service principal federation policy. */ + FederationPolicy update( + UpdateServicePrincipalFederationPolicyRequest updateServicePrincipalFederationPolicyRequest); +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateAccountFederationPolicyRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateAccountFederationPolicyRequest.java new file mode 100755 index 000000000..9acea6094 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateAccountFederationPolicyRequest.java @@ -0,0 +1,81 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** Update account federation policy */ +@Generated +public class UpdateAccountFederationPolicyRequest { + /** */ + @JsonProperty("policy") + private FederationPolicy policy; + + /** */ + @JsonIgnore private String policyId; + + /** + * Field mask is required to be passed into the PATCH request. Field mask specifies which fields + * of the setting payload will be updated. The field mask needs to be supplied as single string. + * To specify multiple fields in the field mask, use comma as the separator (no space). + */ + @JsonIgnore + @QueryParam("update_mask") + private String updateMask; + + public UpdateAccountFederationPolicyRequest setPolicy(FederationPolicy policy) { + this.policy = policy; + return this; + } + + public FederationPolicy getPolicy() { + return policy; + } + + public UpdateAccountFederationPolicyRequest setPolicyId(String policyId) { + this.policyId = policyId; + return this; + } + + public String getPolicyId() { + return policyId; + } + + public UpdateAccountFederationPolicyRequest setUpdateMask(String updateMask) { + this.updateMask = updateMask; + return this; + } + + public String getUpdateMask() { + return updateMask; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + UpdateAccountFederationPolicyRequest that = (UpdateAccountFederationPolicyRequest) o; + return Objects.equals(policy, that.policy) + && Objects.equals(policyId, that.policyId) + && Objects.equals(updateMask, that.updateMask); + } + + @Override + public int hashCode() { + return Objects.hash(policy, policyId, updateMask); + } + + @Override + public String toString() { + return new ToStringer(UpdateAccountFederationPolicyRequest.class) + .add("policy", policy) + .add("policyId", policyId) + .add("updateMask", updateMask) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateServicePrincipalFederationPolicyRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateServicePrincipalFederationPolicyRequest.java new file mode 100755 index 000000000..8d95f0392 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/oauth2/UpdateServicePrincipalFederationPolicyRequest.java @@ -0,0 +1,97 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.oauth2; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** Update service principal federation policy */ +@Generated +public class UpdateServicePrincipalFederationPolicyRequest { + /** */ + @JsonProperty("policy") + private FederationPolicy policy; + + /** */ + @JsonIgnore private String policyId; + + /** The service principal id for the federation policy. */ + @JsonIgnore private Long servicePrincipalId; + + /** + * Field mask is required to be passed into the PATCH request. Field mask specifies which fields + * of the setting payload will be updated. The field mask needs to be supplied as single string. + * To specify multiple fields in the field mask, use comma as the separator (no space). + */ + @JsonIgnore + @QueryParam("update_mask") + private String updateMask; + + public UpdateServicePrincipalFederationPolicyRequest setPolicy(FederationPolicy policy) { + this.policy = policy; + return this; + } + + public FederationPolicy getPolicy() { + return policy; + } + + public UpdateServicePrincipalFederationPolicyRequest setPolicyId(String policyId) { + this.policyId = policyId; + return this; + } + + public String getPolicyId() { + return policyId; + } + + public UpdateServicePrincipalFederationPolicyRequest setServicePrincipalId( + Long servicePrincipalId) { + this.servicePrincipalId = servicePrincipalId; + return this; + } + + public Long getServicePrincipalId() { + return servicePrincipalId; + } + + public UpdateServicePrincipalFederationPolicyRequest setUpdateMask(String updateMask) { + this.updateMask = updateMask; + return this; + } + + public String getUpdateMask() { + return updateMask; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + UpdateServicePrincipalFederationPolicyRequest that = + (UpdateServicePrincipalFederationPolicyRequest) o; + return Objects.equals(policy, that.policy) + && Objects.equals(policyId, that.policyId) + && Objects.equals(servicePrincipalId, that.servicePrincipalId) + && Objects.equals(updateMask, that.updateMask); + } + + @Override + public int hashCode() { + return Objects.hash(policy, policyId, servicePrincipalId, updateMask); + } + + @Override + public String toString() { + return new ToStringer(UpdateServicePrincipalFederationPolicyRequest.class) + .add("policy", policy) + .add("policyId", policyId) + .add("servicePrincipalId", servicePrincipalId) + .add("updateMask", updateMask) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/RestartWindow.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/RestartWindow.java index 3156277a6..6576bd13f 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/RestartWindow.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/RestartWindow.java @@ -5,6 +5,7 @@ import com.databricks.sdk.support.Generated; import com.databricks.sdk.support.ToStringer; import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Collection; import java.util.Objects; @Generated @@ -14,7 +15,7 @@ public class RestartWindow { * start_hour). If not specified all days of the week will be used. */ @JsonProperty("days_of_week") - private RestartWindowDaysOfWeek daysOfWeek; + private Collection daysOfWeek; /** * An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day. @@ -31,12 +32,12 @@ public class RestartWindow { @JsonProperty("time_zone_id") private String timeZoneId; - public RestartWindow setDaysOfWeek(RestartWindowDaysOfWeek daysOfWeek) { + public RestartWindow setDaysOfWeek(Collection daysOfWeek) { this.daysOfWeek = daysOfWeek; return this; } - public RestartWindowDaysOfWeek getDaysOfWeek() { + public Collection getDaysOfWeek() { return daysOfWeek; } diff --git a/examples/docs/pom.xml b/examples/docs/pom.xml index 416720717..a9910fa2f 100644 --- a/examples/docs/pom.xml +++ b/examples/docs/pom.xml @@ -24,7 +24,7 @@ com.databricks databricks-sdk-java - 0.37.0 + 0.38.0 diff --git a/examples/spring-boot-oauth-u2m-demo/pom.xml b/examples/spring-boot-oauth-u2m-demo/pom.xml index 921257641..a739a3b4c 100644 --- a/examples/spring-boot-oauth-u2m-demo/pom.xml +++ b/examples/spring-boot-oauth-u2m-demo/pom.xml @@ -37,7 +37,7 @@ com.databricks databricks-sdk-java - 0.37.0 + 0.38.0 com.fasterxml.jackson.datatype diff --git a/pom.xml b/pom.xml index a3c253213..0719dc332 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ 4.0.0 com.databricks databricks-sdk-parent - 0.37.0 + 0.38.0 pom Databricks SDK for Java The Databricks SDK for Java includes functionality to accelerate development with Java for diff --git a/shaded/pom.xml b/shaded/pom.xml index ca25cbcfd..a5bfc9c7b 100644 --- a/shaded/pom.xml +++ b/shaded/pom.xml @@ -4,7 +4,7 @@ 4.0.0 - 0.37.0 + 0.38.0 com.databricks