From 2613a5119572cdb3b88084b4274995a1e6d26db1 Mon Sep 17 00:00:00 2001 From: Serge Smertin <259697+nfx@users.noreply.github.com> Date: Wed, 22 Dec 2021 18:04:01 +0100 Subject: [PATCH] Reformat code examples (#999) Run of `terrafmt fmt -p '*.md' .` --- .github/ISSUE_TEMPLATE/provider-issue.md | 2 +- Makefile | 4 + README.md | 4 +- docs/data-sources/aws_assume_role_policy.md | 2 +- docs/data-sources/aws_bucket_policy.md | 8 +- docs/data-sources/clusters.md | 6 +- docs/data-sources/dbfs_file.md | 6 +- docs/data-sources/dbfs_file_paths.md | 6 +- docs/data-sources/group.md | 6 +- docs/data-sources/node_type.md | 26 +- docs/data-sources/notebook.md | 6 +- docs/data-sources/notebook_paths.md | 4 +- docs/data-sources/spark_version.md | 26 +- docs/data-sources/user.md | 6 +- docs/guides/aws-e2-firewall-hub-and-spoke.md | 52 +-- docs/guides/aws-e2-firewall-workspace.md | 14 +- docs/guides/aws-workspace.md | 24 +- docs/guides/azure-workspace.md | 8 +- docs/guides/migration-0.3.x.md | 2 +- docs/guides/migration-0.4.x.md | 2 +- docs/guides/passthrough-cluster-per-user.md | 14 +- docs/guides/workspace-management.md | 10 +- docs/index.md | 34 +- docs/resources/cluster.md | 18 +- docs/resources/cluster_policy.md | 84 ++-- docs/resources/dbfs_file.md | 8 +- docs/resources/global_init_script.md | 2 +- docs/resources/group.md | 14 +- docs/resources/group_instance_profile.md | 8 +- docs/resources/group_member.md | 14 +- docs/resources/instance_pool.md | 6 +- docs/resources/instance_profile.md | 16 +- docs/resources/ip_access_list.md | 6 +- docs/resources/library.md | 16 +- docs/resources/mlflow_experiment.md | 4 +- docs/resources/mlflow_model.md | 2 +- docs/resources/mount.md | 136 +++--- docs/resources/mws_customer_managed_keys.md | 24 +- docs/resources/mws_log_delivery.md | 70 ++-- docs/resources/mws_networks.md | 8 +- docs/resources/mws_workspaces.md | 24 +- docs/resources/notebook.md | 6 +- docs/resources/obo_token.md | 20 +- docs/resources/permissions.md | 414 +++++++++---------- docs/resources/secret.md | 10 +- docs/resources/secret_acl.md | 18 +- docs/resources/secret_scope.md | 2 +- docs/resources/service_principal.md | 8 +- docs/resources/sql_endpoint.md | 8 +- docs/resources/sql_global_config.md | 4 +- docs/resources/sql_permissions.md | 24 +- docs/resources/sql_query.md | 8 +- docs/resources/sql_visualization.md | 6 +- docs/resources/token.md | 8 +- docs/resources/user.md | 10 +- docs/resources/user_instance_profile.md | 8 +- docs/resources/workspace_conf.md | 6 +- scripts/README.md | 2 +- scripts/azvnet-integration/README.md | 2 +- scripts/gcp-integration/README.md | 2 +- 60 files changed, 651 insertions(+), 647 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/provider-issue.md b/.github/ISSUE_TEMPLATE/provider-issue.md index e796b6084a..3861e8d646 100644 --- a/.github/ISSUE_TEMPLATE/provider-issue.md +++ b/.github/ISSUE_TEMPLATE/provider-issue.md @@ -39,4 +39,4 @@ TF_LOG=DEBUG terraform plan 2>&1 | grep databricks | sed -E 's/^.* plugin[^:]+: If Terraform produced a panic, please provide a link to a GitHub Gist containing the output of the `crash.log`. ### Important Factoids -Are there anything atypical about your accounts that we should know? \ No newline at end of file +Are there anything atypical about your accounts that we should know? diff --git a/Makefile b/Makefile index acbc12f8a2..ae0ab54baa 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,10 @@ fmt: @echo "✓ Formatting source code with gofmt ..." @gofmt -w $(shell find . -type f -name '*.go' -not -path "./vendor/*") +fmt-docs: + @echo "✓ Formatting code samples in documentation" + @terrafmt fmt -p '*.md' . + lint: vendor @echo "✓ Linting source code with https://staticcheck.io/ ..." @staticcheck ./... diff --git a/README.md b/README.md index cf69f2517c..4e8978faf0 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ If you use Terraform 0.13 or newer, please refer to instructions specified at [r terraform { required_providers { databricks = { - source = "databrickslabs/databricks" + source = "databrickslabs/databricks" version = "0.4.1" } } @@ -80,7 +80,7 @@ Then create a small sample file, named `main.tf` with approximately following co ```terraform provider "databricks" { - host = "https://abc-defg-024.cloud.databricks.com/" + host = "https://abc-defg-024.cloud.databricks.com/" token = "" } diff --git a/docs/data-sources/aws_assume_role_policy.md b/docs/data-sources/aws_assume_role_policy.md index b23cedf1fd..b72ee6b49d 100644 --- a/docs/data-sources/aws_assume_role_policy.md +++ b/docs/data-sources/aws_assume_role_policy.md @@ -55,4 +55,4 @@ resource "databricks_mws_credentials" "this" { In addition to all arguments above, the following attributes are exported: -* `json` - AWS IAM Policy JSON document \ No newline at end of file +* `json` - AWS IAM Policy JSON document diff --git a/docs/data-sources/aws_bucket_policy.md b/docs/data-sources/aws_bucket_policy.md index 3558d56068..7bb18d8f51 100644 --- a/docs/data-sources/aws_bucket_policy.md +++ b/docs/data-sources/aws_bucket_policy.md @@ -9,8 +9,8 @@ This datasource configures a simple access policy for AWS S3 buckets, so that Da ```hcl resource "aws_s3_bucket" "this" { - bucket = "" - acl = "private" + bucket = "" + acl = "private" force_destroy = true } @@ -19,8 +19,8 @@ data "databricks_aws_bucket_policy" "stuff" { } resource "aws_s3_bucket_policy" "this" { - bucket = aws_s3_bucket.this.id - policy = data.databricks_aws_bucket_policy.this.json + bucket = aws_s3_bucket.this.id + policy = data.databricks_aws_bucket_policy.this.json } ``` diff --git a/docs/data-sources/clusters.md b/docs/data-sources/clusters.md index fde9c0eeec..fe4aa7c8d0 100644 --- a/docs/data-sources/clusters.md +++ b/docs/data-sources/clusters.md @@ -13,7 +13,7 @@ Retrieve all clusters on this workspace on AWS or GCP: ```hcl data "databricks_clusters" "all" { - depends_on = [databricks_mws_workspaces.this] + depends_on = [databricks_mws_workspaces.this] } ``` @@ -21,8 +21,8 @@ Retrieve all clusters with "Shared" in their cluster name on this Azure Databric ```hcl data "databricks_clusters" "all_shared" { - depends_on = [azurerm_databricks_workspace.this] - cluster_name_contains = "shared" + depends_on = [azurerm_databricks_workspace.this] + cluster_name_contains = "shared" } ``` diff --git a/docs/data-sources/dbfs_file.md b/docs/data-sources/dbfs_file.md index 2d0e5c635c..f59fcf8aca 100644 --- a/docs/data-sources/dbfs_file.md +++ b/docs/data-sources/dbfs_file.md @@ -11,8 +11,8 @@ This data source allows to get file content from DBFS ```hcl data "databricks_dbfs_file" "report" { - path = "dbfs:/reports/some.csv" - limit_file_size = 10240 + path = "dbfs:/reports/some.csv" + limit_file_size = 10240 } ``` ## Argument Reference @@ -25,4 +25,4 @@ data "databricks_dbfs_file" "report" { This data source exports the following attributes: * `content` - base64-encoded file contents -* `file_size` - size of the file in bytes \ No newline at end of file +* `file_size` - size of the file in bytes diff --git a/docs/data-sources/dbfs_file_paths.md b/docs/data-sources/dbfs_file_paths.md index 1d82a5c14a..3fa3ab7c59 100644 --- a/docs/data-sources/dbfs_file_paths.md +++ b/docs/data-sources/dbfs_file_paths.md @@ -11,8 +11,8 @@ This data source allows to get list of file names from DBFS ```hcl data "databricks_dbfs_file_paths" "partitions" { - path = "dbfs:/user/hive/default.db/table" - recursive = false + path = "dbfs:/user/hive/default.db/table" + recursive = false } ``` ## Argument Reference @@ -24,4 +24,4 @@ data "databricks_dbfs_file_paths" "partitions" { This data source exports the following attributes: -* `path_list` - returns list of objects with `path` and `file_size` attributes in each \ No newline at end of file +* `path_list` - returns list of objects with `path` and `file_size` attributes in each diff --git a/docs/data-sources/group.md b/docs/data-sources/group.md index 8fff4f3ffb..6113495a21 100644 --- a/docs/data-sources/group.md +++ b/docs/data-sources/group.md @@ -13,15 +13,15 @@ Adding user to administrative group ```hcl data "databricks_group" "admins" { - display_name = "admins" + display_name = "admins" } resource "databricks_user" "me" { - user_name = "me@example.com" + user_name = "me@example.com" } resource "databricks_group_member" "my_member_a" { - group_id = data.databricks_group.admins.id + group_id = data.databricks_group.admins.id member_id = databricks_user.me.id } ``` diff --git a/docs/data-sources/node_type.md b/docs/data-sources/node_type.md index e73a4ef4e2..98c5355c94 100644 --- a/docs/data-sources/node_type.md +++ b/docs/data-sources/node_type.md @@ -13,26 +13,26 @@ Gets the smallest node type for [databricks_cluster](../resources/cluster.md) th ```hcl data "databricks_node_type" "with_gpu" { - local_disk = true - min_cores = 16 - gb_per_core = 1 - min_gpus = 1 + local_disk = true + min_cores = 16 + gb_per_core = 1 + min_gpus = 1 } data "databricks_spark_version" "gpu_ml" { gpu = true - ml = true + ml = true } resource "databricks_cluster" "research" { - cluster_name = "Research Cluster" - spark_version = data.databricks_spark_version.gpu_ml.id - node_type_id = data.databricks_node_type.with_gpu.id - autotermination_minutes = 20 - autoscale { - min_workers = 1 - max_workers = 50 - } + cluster_name = "Research Cluster" + spark_version = data.databricks_spark_version.gpu_ml.id + node_type_id = data.databricks_node_type.with_gpu.id + autotermination_minutes = 20 + autoscale { + min_workers = 1 + max_workers = 50 + } } ``` diff --git a/docs/data-sources/notebook.md b/docs/data-sources/notebook.md index bf37d0d9a0..506463b408 100644 --- a/docs/data-sources/notebook.md +++ b/docs/data-sources/notebook.md @@ -11,8 +11,8 @@ This data source allows to export a notebook from workspace ```hcl data "databricks_notebook" "features" { - path = "/Production/Features" - format = "SOURCE" + path = "/Production/Features" + format = "SOURCE" } ``` @@ -28,4 +28,4 @@ This data source exports the following attributes: * `content` - notebook content in selected format * `language` - notebook language * `object_id` - notebook object ID -* `object_type` - notebook object type \ No newline at end of file +* `object_type` - notebook object type diff --git a/docs/data-sources/notebook_paths.md b/docs/data-sources/notebook_paths.md index 63c8316d83..e8e52680c7 100644 --- a/docs/data-sources/notebook_paths.md +++ b/docs/data-sources/notebook_paths.md @@ -11,8 +11,8 @@ This data source allows to list notebooks in the workspace ```hcl data "databricks_notebook_paths" "prod" { - path = "/Production" - recursive = true + path = "/Production" + recursive = true } ``` diff --git a/docs/data-sources/spark_version.md b/docs/data-sources/spark_version.md index 3b6206c717..18d794970f 100644 --- a/docs/data-sources/spark_version.md +++ b/docs/data-sources/spark_version.md @@ -13,26 +13,26 @@ Gets Databricks Runtime (DBR) version that could be used for `spark_version` par ```hcl data "databricks_node_type" "with_gpu" { - local_disk = true - min_cores = 16 - gb_per_core = 1 - min_gpus = 1 + local_disk = true + min_cores = 16 + gb_per_core = 1 + min_gpus = 1 } data "databricks_spark_version" "gpu_ml" { gpu = true - ml = true + ml = true } resource "databricks_cluster" "research" { - cluster_name = "Research Cluster" - spark_version = data.databricks_spark_version.gpu_ml.id - node_type_id = data.databricks_node_type.with_gpu.id - autotermination_minutes = 20 - autoscale { - min_workers = 1 - max_workers = 50 - } + cluster_name = "Research Cluster" + spark_version = data.databricks_spark_version.gpu_ml.id + node_type_id = data.databricks_node_type.with_gpu.id + autotermination_minutes = 20 + autoscale { + min_workers = 1 + max_workers = 50 + } } ``` diff --git a/docs/data-sources/user.md b/docs/data-sources/user.md index f1349ab808..0ccceaa38f 100644 --- a/docs/data-sources/user.md +++ b/docs/data-sources/user.md @@ -14,15 +14,15 @@ Adding user to administrative group ```hcl data "databricks_group" "admins" { - display_name = "admins" + display_name = "admins" } data "databricks_user" "me" { - user_name = "me@example.com" + user_name = "me@example.com" } resource "databricks_group_member" "my_member_a" { - group_id = data.databricks_group.admins.id + group_id = data.databricks_group.admins.id member_id = data.databricks_user.me.id } ``` diff --git a/docs/guides/aws-e2-firewall-hub-and-spoke.md b/docs/guides/aws-e2-firewall-hub-and-spoke.md index 11e15dd50b..a34009f16e 100644 --- a/docs/guides/aws-e2-firewall-hub-and-spoke.md +++ b/docs/guides/aws-e2-firewall-hub-and-spoke.md @@ -61,17 +61,17 @@ variable "prefix" { } locals { - prefix = "${var.prefix}${random_string.naming.result}" - spoke_db_private_subnets_cidr = [cidrsubnet(var.spoke_cidr_block, 3, 0), cidrsubnet(var.spoke_cidr_block, 3, 1)] - spoke_tgw_private_subnets_cidr = [cidrsubnet(var.spoke_cidr_block, 3, 2), cidrsubnet(var.spoke_cidr_block, 3, 3)] - hub_tgw_private_subnets_cidr = [cidrsubnet(var.hub_cidr_block, 3, 0)] - hub_nat_public_subnets_cidr = [cidrsubnet(var.hub_cidr_block, 3, 1)] - hub_firewall_subnets_cidr = [cidrsubnet(var.hub_cidr_block, 3, 2)] - sg_egress_ports = [443, 3306, 6666] - sg_ingress_protocol = ["tcp", "udp"] - sg_egress_protocol = ["tcp", "udp"] - availability_zones = ["${var.region}a", "${var.region}b"] - db_root_bucket = "${var.prefix}${random_string.naming.result}-rootbucket.s3.amazonaws.com" + prefix = "${var.prefix}${random_string.naming.result}" + spoke_db_private_subnets_cidr = [cidrsubnet(var.spoke_cidr_block, 3, 0), cidrsubnet(var.spoke_cidr_block, 3, 1)] + spoke_tgw_private_subnets_cidr = [cidrsubnet(var.spoke_cidr_block, 3, 2), cidrsubnet(var.spoke_cidr_block, 3, 3)] + hub_tgw_private_subnets_cidr = [cidrsubnet(var.hub_cidr_block, 3, 0)] + hub_nat_public_subnets_cidr = [cidrsubnet(var.hub_cidr_block, 3, 1)] + hub_firewall_subnets_cidr = [cidrsubnet(var.hub_cidr_block, 3, 2)] + sg_egress_ports = [443, 3306, 6666] + sg_ingress_protocol = ["tcp", "udp"] + sg_egress_protocol = ["tcp", "udp"] + availability_zones = ["${var.region}a", "${var.region}b"] + db_root_bucket = "${var.prefix}${random_string.naming.result}-rootbucket.s3.amazonaws.com" } ``` @@ -94,9 +94,9 @@ terraform { version = "0.4.1" } aws = { - source = "hashicorp/aws" + source = "hashicorp/aws" version = "3.49.0" - } + } } } @@ -269,12 +269,12 @@ module "vpc_endpoints" { endpoints = { s3 = { - service = "s3" - service_type = "Gateway" + service = "s3" + service_type = "Gateway" route_table_ids = flatten([ aws_route_table.spoke_db_private_rt.id ]) - tags = { + tags = { Name = "${local.prefix}-s3-vpc-endpoint" } }, @@ -348,7 +348,7 @@ resource "aws_subnet" "hub_firewall_subnet" { cidr_block = element(local.hub_firewall_subnets_cidr, count.index) availability_zone = element(local.availability_zones, count.index) map_public_ip_on_launch = false - tags = merge(var.tags, { + tags = merge(var.tags, { Name = "${local.prefix}-hub-firewall-public-${element(local.availability_zones, count.index)}" }) } @@ -486,8 +486,8 @@ resource "aws_ec2_transit_gateway_vpc_attachment" "hub" { transit_gateway_default_route_table_association = true transit_gateway_default_route_table_propagation = true tags = merge(var.tags, { - Name = "${local.prefix}-hub" - Purpose = "Transit Gateway Attachment - Hub VPC" + Name = "${local.prefix}-hub" + Purpose = "Transit Gateway Attachment - Hub VPC" }) } @@ -501,8 +501,8 @@ resource "aws_ec2_transit_gateway_vpc_attachment" "spoke" { transit_gateway_default_route_table_association = true transit_gateway_default_route_table_propagation = true tags = merge(var.tags, { - Name = "${local.prefix}-spoke" - Purpose = "Transit Gateway Attachment - Spoke VPC" + Name = "${local.prefix}-spoke" + Purpose = "Transit Gateway Attachment - Spoke VPC" }) } @@ -559,14 +559,14 @@ resource "aws_networkfirewall_rule_group" "databricks_fqdns_rg" { rules_source_list { generated_rules_type = "ALLOWLIST" target_types = ["TLS_SNI", "HTTP_HOST"] - targets = concat([var.db_web_app, var.db_tunnel, var.db_rds,local.db_root_bucket], var.whitelisted_urls) + targets = concat([var.db_web_app, var.db_tunnel, var.db_rds, local.db_root_bucket], var.whitelisted_urls) } } rule_variables { ip_sets { key = "HOME_NET" ip_set { - definition = [var.spoke_cidr_block,var.hub_cidr_block] + definition = [var.spoke_cidr_block, var.hub_cidr_block] } } } @@ -594,7 +594,7 @@ resource "aws_networkfirewall_rule_group" "allow_db_cpl_protocols_rg" { ip_sets { key = "HOME_NET" ip_set { - definition = [var.spoke_cidr_block,var.hub_cidr_block] + definition = [var.spoke_cidr_block, var.hub_cidr_block] } } } @@ -636,7 +636,7 @@ resource "aws_networkfirewall_rule_group" "deny_protocols_rg" { ip_sets { key = "HOME_NET" ip_set { - definition = [var.spoke_cidr_block,var.hub_cidr_block] + definition = [var.spoke_cidr_block, var.hub_cidr_block] } } } @@ -738,4 +738,4 @@ resource "aws_route" "db_igw_nat_firewall" { ``` ## Troubleshooting -If the Databricks clusters cannot reach DBFS or VPC endpoints do not work as intended, for example if your data sources are inaccessible or if the traffic is bypassing the endpoints please visit [Troubleshoot regional endpoints](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#troubleshoot-regional-endpoints) \ No newline at end of file +If the Databricks clusters cannot reach DBFS or VPC endpoints do not work as intended, for example if your data sources are inaccessible or if the traffic is bypassing the endpoints please visit [Troubleshoot regional endpoints](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#troubleshoot-regional-endpoints) diff --git a/docs/guides/aws-e2-firewall-workspace.md b/docs/guides/aws-e2-firewall-workspace.md index a98c1d6ce7..cfc587e6fd 100644 --- a/docs/guides/aws-e2-firewall-workspace.md +++ b/docs/guides/aws-e2-firewall-workspace.md @@ -92,9 +92,9 @@ terraform { version = "0.4.1" } aws = { - source = "hashicorp/aws" + source = "hashicorp/aws" version = "3.49.0" - } + } } } @@ -351,12 +351,12 @@ module "vpc_endpoints" { endpoints = { s3 = { - service = "s3" - service_type = "Gateway" + service = "s3" + service_type = "Gateway" route_table_ids = flatten([ aws_route_table.db_private_rt.id ]) - tags = { + tags = { Name = "${local.prefix}-s3-vpc-endpoint" } }, @@ -401,7 +401,7 @@ resource "aws_networkfirewall_rule_group" "databricks_fqdns_rg" { rules_source_list { generated_rules_type = "ALLOWLIST" target_types = ["TLS_SNI", "HTTP_HOST"] - targets = concat([var.db_web_app, var.db_tunnel, var.db_rds,local.db_root_bucket], var.whitelisted_urls) + targets = concat([var.db_web_app, var.db_tunnel, var.db_rds, local.db_root_bucket], var.whitelisted_urls) } } rule_variables { @@ -578,4 +578,4 @@ resource "aws_route" "db_igw_nat_firewall" { ## Troubleshooting -If the Databricks clusters cannot reach DBFS or VPC endpoints do not work as intended, for example if your data sources are inaccessible or if the traffic is bypassing the endpoints please visit [Troubleshoot regional endpoints](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#troubleshoot-regional-endpoints) \ No newline at end of file +If the Databricks clusters cannot reach DBFS or VPC endpoints do not work as intended, for example if your data sources are inaccessible or if the traffic is bypassing the endpoints please visit [Troubleshoot regional endpoints](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#troubleshoot-regional-endpoints) diff --git a/docs/guides/aws-workspace.md b/docs/guides/aws-workspace.md index 2bd1395e31..063043ed88 100644 --- a/docs/guides/aws-workspace.md +++ b/docs/guides/aws-workspace.md @@ -57,7 +57,7 @@ terraform { version = "0.4.1" } aws = { - source = "hashicorp/aws" + source = "hashicorp/aws" version = "3.49.0" } } @@ -131,10 +131,10 @@ module "vpc" { create_igw = true private_subnets = [cidrsubnet(var.cidr_block, 3, 1), - cidrsubnet(var.cidr_block, 3, 2)] + cidrsubnet(var.cidr_block, 3, 2)] manage_default_security_group = true - default_security_group_name = "${local.prefix}-sg" + default_security_group_name = "${local.prefix}-sg" default_security_group_egress = [{ cidr_blocks = "0.0.0.0/0" @@ -147,7 +147,7 @@ module "vpc" { } module "vpc_endpoints" { - source = "terraform-aws-modules/vpc/aws//modules/vpc-endpoints" + source = "terraform-aws-modules/vpc/aws//modules/vpc-endpoints" version = "3.2.0" vpc_id = module.vpc.vpc_id @@ -155,12 +155,12 @@ module "vpc_endpoints" { endpoints = { s3 = { - service = "s3" - service_type = "Gateway" + service = "s3" + service_type = "Gateway" route_table_ids = flatten([ module.vpc.private_route_table_ids, - module.vpc.public_route_table_ids]) - tags = { + module.vpc.public_route_table_ids]) + tags = { Name = "${local.prefix}-s3-vpc-endpoint" } }, @@ -168,7 +168,7 @@ module "vpc_endpoints" { service = "sts" private_dns_enabled = true subnet_ids = module.vpc.private_subnets - tags = { + tags = { Name = "${local.prefix}-sts-vpc-endpoint" } }, @@ -176,7 +176,7 @@ module "vpc_endpoints" { service = "kinesis-streams" private_dns_enabled = true subnet_ids = module.vpc.private_subnets - tags = { + tags = { Name = "${local.prefix}-kinesis-vpc-endpoint" } }, @@ -287,7 +287,7 @@ In [the next step](workspace-management.md), please use the following configurat ```hcl provider "databricks" { - host = module.ai.databricks_host + host = module.ai.databricks_host token = module.ai.databricks_token } ``` @@ -306,7 +306,7 @@ As a workaround give the `aws_iam_role` more time to be created with a `time_sle ```hcl resource "time_sleep" "wait" { depends_on = [ - aws_iam_role.cross_account_role] + aws_iam_role.cross_account_role] create_duration = "10s" } ``` diff --git a/docs/guides/azure-workspace.md b/docs/guides/azure-workspace.md index a73cf6c027..c5aa07f0d9 100644 --- a/docs/guides/azure-workspace.md +++ b/docs/guides/azure-workspace.md @@ -11,8 +11,8 @@ The following sample configuration assumes you have authorized with `az login` o ```hcl terraform { required_providers { - azurerm = "~> 2.33" - random = "~> 2.2" + azurerm = "~> 2.33" + random = "~> 2.2" } } @@ -21,7 +21,7 @@ provider "azurerm" { } variable "region" { - type = string + type = string default = "westeurope" } @@ -84,4 +84,4 @@ In [the next step](workspace-management.md), please use the [special configurati provider "databricks" { host = azurerm_databricks_workspace.this.workspace_url } -``` \ No newline at end of file +``` diff --git a/docs/guides/migration-0.3.x.md b/docs/guides/migration-0.3.x.md index 735b44ed33..ed22926bdf 100644 --- a/docs/guides/migration-0.3.x.md +++ b/docs/guides/migration-0.3.x.md @@ -65,4 +65,4 @@ Notebook on Databricks workspace would only be changed, if Terraform stage did c ## databricks_default_user_roles -* This data source was removed as deprecated. Please use [databricks_group](../data-sources/group.md) data source for performing equivalent tasks. \ No newline at end of file +* This data source was removed as deprecated. Please use [databricks_group](../data-sources/group.md) data source for performing equivalent tasks. diff --git a/docs/guides/migration-0.4.x.md b/docs/guides/migration-0.4.x.md index 98177ff899..77eabe3951 100644 --- a/docs/guides/migration-0.4.x.md +++ b/docs/guides/migration-0.4.x.md @@ -20,4 +20,4 @@ Certain resources undergone changes in order to improve long-term maintainabilit ## databricks_user and databricks_group -* Globally rename `allow_sql_analytics_access` to `databricks_sql_access` field to allow users and groups access to Databricks SQL \ No newline at end of file +* Globally rename `allow_sql_analytics_access` to `databricks_sql_access` field to allow users and groups access to Databricks SQL diff --git a/docs/guides/passthrough-cluster-per-user.md b/docs/guides/passthrough-cluster-per-user.md index 649c2a9593..47e41a222d 100644 --- a/docs/guides/passthrough-cluster-per-user.md +++ b/docs/guides/passthrough-cluster-per-user.md @@ -13,7 +13,7 @@ data "databricks_group" "dev" { data "databricks_user" "dev" { for_each = data.databricks_group.dev.members - user_id = each.key + user_id = each.key } ``` @@ -28,8 +28,8 @@ data "databricks_node_type" "smallest" { resource "databricks_cluster" "dev" { for_each = data.databricks_user.dev - cluster_name = "${each.value.display_name} dev cluster" - single_user_name = each.value.user_name + cluster_name = "${each.value.display_name} dev cluster" + single_user_name = each.value.user_name spark_version = data.databricks_spark_version.latest.id node_type_id = data.databricks_node_type.smallest.id @@ -41,7 +41,7 @@ resource "databricks_cluster" "dev" { "spark.master" : "local[*]", # Passthrough - "spark.databricks.passthrough.enabled": "true" + "spark.databricks.passthrough.enabled" : "true" } custom_tags = { @@ -50,11 +50,11 @@ resource "databricks_cluster" "dev" { } resource "databricks_permissions" "dev_restart" { - for_each = data.databricks_user.dev + for_each = data.databricks_user.dev cluster_id = databricks_cluster.dev[each.key].cluster_id access_control { - user_name = each.value.user_name + user_name = each.value.user_name permission_level = "CAN_RESTART" } } -``` \ No newline at end of file +``` diff --git a/docs/guides/workspace-management.md b/docs/guides/workspace-management.md index c720eae3be..2965c9d3bb 100644 --- a/docs/guides/workspace-management.md +++ b/docs/guides/workspace-management.md @@ -70,7 +70,7 @@ resource "databricks_job" "this" { } resource "databricks_cluster" "this" { - cluster_name = "Exploration (${data.databricks_current_user.me.alphanumeric})" + cluster_name = "Exploration (${data.databricks_current_user.me.alphanumeric})" spark_version = data.databricks_spark_version.latest.id instance_pool_id = databricks_instance_pool.smallest_nodes.id autotermination_minutes = 20 @@ -214,14 +214,14 @@ data "http" "my" { resource "databricks_workspace_conf" "this" { custom_config = { - "enableIpAccessLists": true + "enableIpAccessLists" : true } } resource "databricks_ip_access_list" "only_me" { - label = "only ${data.http.my.body} is allowed to access workspace" - list_type = "ALLOW" + label = "only ${data.http.my.body} is allowed to access workspace" + list_type = "ALLOW" ip_addresses = ["${data.http.my.body}/32"] - depends_on = [databricks_workspace_conf.this] + depends_on = [databricks_workspace_conf.this] } ``` diff --git a/docs/index.md b/docs/index.md index a6d4f4b321..84f3e8b308 100644 --- a/docs/index.md +++ b/docs/index.md @@ -188,9 +188,9 @@ Since v0.3.8, it's possible to leverage [Azure Managed Service Identity](https:/ ```hcl provider "databricks" { host = data.azurerm_databricks_workspace.this.workspace_url - + # ARM_USE_MSI environment variable is recommended - azure_use_msi = true + azure_use_msi = true } ``` @@ -204,10 +204,10 @@ provider "azurerm" { } resource "azurerm_databricks_workspace" "this" { - location = "centralus" - name = "my-workspace-name" - resource_group_name = var.resource_group - sku = "premium" + location = "centralus" + name = "my-workspace-name" + resource_group_name = var.resource_group + sku = "premium" } provider "databricks" { @@ -215,8 +215,8 @@ provider "databricks" { } resource "databricks_user" "my-user" { - user_name = "test-user@databricks.com" - display_name = "Test User" + user_name = "test-user@databricks.com" + display_name = "Test User" } ``` @@ -224,17 +224,17 @@ resource "databricks_user" "my-user" { ```hcl provider "azurerm" { - client_id = var.client_id - client_secret = var.client_secret - tenant_id = var.tenant_id - subscription_id = var.subscription_id + client_id = var.client_id + client_secret = var.client_secret + tenant_id = var.tenant_id + subscription_id = var.subscription_id } resource "azurerm_databricks_workspace" "this" { - location = "centralus" - name = "my-workspace-name" - resource_group_name = var.resource_group - sku = "premium" + location = "centralus" + name = "my-workspace-name" + resource_group_name = var.resource_group + sku = "premium" } provider "databricks" { @@ -334,7 +334,7 @@ If you notice below error, it might be due to the fact that [required_providers] terraform { required_providers { databricks = { - source = "databrickslabs/databricks" + source = "databrickslabs/databricks" version = "0.4.1" } } diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index 855c1681b2..c5c80fb66a 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -67,9 +67,9 @@ resource "databricks_cluster" "shared_autoscaling" { max_workers = 50 } spark_conf = { - "spark.databricks.io.cache.enabled": true, - "spark.databricks.io.cache.maxDiskUsage": "50g", - "spark.databricks.io.cache.maxMetaDataCache": "1g" + "spark.databricks.io.cache.enabled" : true, + "spark.databricks.io.cache.maxDiskUsage" : "50g", + "spark.databricks.io.cache.maxMetaDataCache" : "1g" } } ``` @@ -222,7 +222,7 @@ Example of pushing all cluster logs to S3: cluster_log_conf { s3 { destination = "s3a://acmecorp-main/cluster-logs" - region = "us-east-1" + region = "us-east-1" } } ``` @@ -255,7 +255,7 @@ Example of taking init script from S3: init_scripts { s3 { destination = "s3a://acmecorp-main/init-scripts/install-elk.sh" - region = "us-east-1" + region = "us-east-1" } } ``` @@ -289,10 +289,10 @@ resource "databricks_cluster" "this" { max_workers = 50 } aws_attributes { - availability = "SPOT" - zone_id = "us-east-1" - first_on_demand = 1 - spot_bid_price_percent = 100 + availability = "SPOT" + zone_id = "us-east-1" + first_on_demand = 1 + spot_bid_price_percent = 100 } } ``` diff --git a/docs/resources/cluster_policy.md b/docs/resources/cluster_policy.md index 801a4fa38b..35322248f0 100644 --- a/docs/resources/cluster_policy.md +++ b/docs/resources/cluster_policy.md @@ -26,42 +26,42 @@ Let us take a look at an example of how you can manage two teams: Marketing and ```hcl variable "team" { - description = "Team that performs the work" + description = "Team that performs the work" } variable "policy_overrides" { - description = "Cluster policy overrides" + description = "Cluster policy overrides" } locals { - default_policy = { - "dbus_per_hour" : { - "type" : "range", - "maxValue" : 10 - }, - "autotermination_minutes": { - "type": "fixed", - "value": 20, - "hidden": true - }, - "custom_tags.Team" : { - "type" : "fixed", - "value" : var.team - } + default_policy = { + "dbus_per_hour" : { + "type" : "range", + "maxValue" : 10 + }, + "autotermination_minutes" : { + "type" : "fixed", + "value" : 20, + "hidden" : true + }, + "custom_tags.Team" : { + "type" : "fixed", + "value" : var.team } + } } resource "databricks_cluster_policy" "fair_use" { - name = "${var.team} cluster policy" - definition = jsonencode(merge(local.default_policy, var.policy_overrides)) + name = "${var.team} cluster policy" + definition = jsonencode(merge(local.default_policy, var.policy_overrides)) } resource "databricks_permissions" "can_use_cluster_policyinstance_profile" { - cluster_policy_id = databricks_cluster_policy.fair_use.id - access_control { - group_name = var.team - permission_level = "CAN_USE" - } + cluster_policy_id = databricks_cluster_policy.fair_use.id + access_control { + group_name = var.team + permission_level = "CAN_USE" + } } ``` @@ -69,27 +69,27 @@ And custom instances of that base policy module for our marketing and data engin ```hcl module "marketing_compute_policy" { - source = "../modules/databricks-cluster-policy" - team = "marketing" - policy_overrides = { - // only marketing guys will benefit from delta cache this way - "spark_conf.spark.databricks.io.cache.enabled": { - "type": "fixed", - "value": "true" - }, - } + source = "../modules/databricks-cluster-policy" + team = "marketing" + policy_overrides = { + // only marketing guys will benefit from delta cache this way + "spark_conf.spark.databricks.io.cache.enabled" : { + "type" : "fixed", + "value" : "true" + }, + } } module "engineering_compute_policy" { - source = "../modules/databricks-cluster-policy" - team = "engineering" - policy_overrides = { - "dbus_per_hour" : { - "type" : "range", - // only engineering guys can spin up big clusters - "maxValue" : 50 - }, - } + source = "../modules/databricks-cluster-policy" + team = "engineering" + policy_overrides = { + "dbus_per_hour" : { + "type" : "range", + // only engineering guys can spin up big clusters + "maxValue" : 50 + }, + } } ``` @@ -113,4 +113,4 @@ The resource cluster policy can be imported using the policy id: ```bash $ terraform import databricks_cluster_policy.this -``` \ No newline at end of file +``` diff --git a/docs/resources/dbfs_file.md b/docs/resources/dbfs_file.md index 1b99114865..9348635857 100644 --- a/docs/resources/dbfs_file.md +++ b/docs/resources/dbfs_file.md @@ -12,7 +12,7 @@ In order to manage file on Databricks File System with Terraform, you must speci ```hcl resource "databricks_dbfs_file" "this" { source = "${path.module}/main.tf" - path = "/tmp/main.tf" + path = "/tmp/main.tf" } ``` @@ -37,13 +37,13 @@ data "databricks_clusters" "all" { resource "databricks_dbfs_file" "app" { source = "${path.module}/baz.whl" - path = "/FileStore/baz.whl" + path = "/FileStore/baz.whl" } resource "databricks_library" "app" { - for_each = data.databricks_clusters.all.ids + for_each = data.databricks_clusters.all.ids cluster_id = each.key - whl = databricks_dbfs_file.app.dbfs_path + whl = databricks_dbfs_file.app.dbfs_path } ``` diff --git a/docs/resources/global_init_script.md b/docs/resources/global_init_script.md index e6073f8f77..b99fca7a24 100644 --- a/docs/resources/global_init_script.md +++ b/docs/resources/global_init_script.md @@ -12,7 +12,7 @@ You can declare Terraform-managed global init script by specifying `source` attr ```hcl resource "databricks_global_init_script" "init1" { source = "${path.module}/init.sh" - name = "my init script" + name = "my init script" } ``` diff --git a/docs/resources/group.md b/docs/resources/group.md index 11d9971f1f..17acd19303 100644 --- a/docs/resources/group.md +++ b/docs/resources/group.md @@ -17,8 +17,8 @@ Creating some group ```hcl resource "databricks_group" "this" { - display_name = "Some Group" - allow_cluster_create = true + display_name = "Some Group" + allow_cluster_create = true allow_instance_pool_create = true } ``` @@ -27,18 +27,18 @@ Adding [databricks_user](user.md) as [databricks_group_member](group_member.md) ```hcl resource "databricks_group" "this" { - display_name = "Some Group" - allow_cluster_create = true + display_name = "Some Group" + allow_cluster_create = true allow_instance_pool_create = true } resource "databricks_user" "this" { - user_name = "someone@example.com" + user_name = "someone@example.com" } resource "databricks_group_member" "vip_member" { - group_id = databricks_group.this.id - member_id = databricks_user.this.id + group_id = databricks_group.this.id + member_id = databricks_user.this.id } ``` diff --git a/docs/resources/group_instance_profile.md b/docs/resources/group_instance_profile.md index 84e8da9bde..be303b6a17 100644 --- a/docs/resources/group_instance_profile.md +++ b/docs/resources/group_instance_profile.md @@ -11,16 +11,16 @@ This resource allows you to attach instance profiles to groups created by the [g ```hcl resource "databricks_instance_profile" "instance_profile" { - instance_profile_arn = "my_instance_profile_arn" + instance_profile_arn = "my_instance_profile_arn" } resource "databricks_group" "my_group" { - display_name = "my_group_name" + display_name = "my_group_name" } resource "databricks_group_instance_profile" "my_group_instance_profile" { - group_id = databricks_group.my_group.id - instance_profile_id = databricks_instance_profile.instance_profile.id + group_id = databricks_group.my_group.id + instance_profile_id = databricks_instance_profile.instance_profile.id } ``` ## Argument Reference diff --git a/docs/resources/group_member.md b/docs/resources/group_member.md index b9f1e23e36..e0e04d7eca 100644 --- a/docs/resources/group_member.md +++ b/docs/resources/group_member.md @@ -11,25 +11,25 @@ After the following example, Bradley would have direct membership in group B and ```hcl resource "databricks_group" "a" { - display_name = "A" + display_name = "A" } resource "databricks_group" "b" { - display_name = "B" + display_name = "B" } resource "databricks_group_member" "ab" { - group_id = databricks_group.a.id - member_id = databricks_group.b.id + group_id = databricks_group.a.id + member_id = databricks_group.b.id } resource "databricks_user" "bradley" { - user_name = "bradley@example.com" + user_name = "bradley@example.com" } resource "databricks_group_member" "bb" { - group_id = databricks_group.b.id - member_id = databricks_user.bradley.id + group_id = databricks_group.b.id + member_id = databricks_user.bradley.id } ``` diff --git a/docs/resources/instance_pool.md b/docs/resources/instance_pool.md index beb5ded73e..1465a70ea0 100644 --- a/docs/resources/instance_pool.md +++ b/docs/resources/instance_pool.md @@ -19,8 +19,8 @@ resource "databricks_instance_pool" "smallest_nodes" { max_capacity = 300 node_type_id = data.databricks_node_type.smallest.id aws_attributes { - availability = "ON_DEMAND" - zone_id = "us-east-1a" + availability = "ON_DEMAND" + zone_id = "us-east-1a" spot_bid_price_percent = "100" } idle_instance_autotermination_minutes = 10 @@ -28,7 +28,7 @@ resource "databricks_instance_pool" "smallest_nodes" { disk_type { ebs_volume_type = "GENERAL_PURPOSE_SSD" } - disk_size = 80 + disk_size = 80 disk_count = 1 } } diff --git a/docs/resources/instance_profile.md b/docs/resources/instance_profile.md index 06dce3a255..bc093bb6a7 100644 --- a/docs/resources/instance_profile.md +++ b/docs/resources/instance_profile.md @@ -58,11 +58,11 @@ resource "databricks_cluster" "this" { max_workers = 50 } aws_attributes { - instance_profile_arn = databricks_instance_profile.shared.id - availability = "SPOT" - zone_id = "us-east-1" - first_on_demand = 1 - spot_bid_price_percent = 100 + instance_profile_arn = databricks_instance_profile.shared.id + availability = "SPOT" + zone_id = "us-east-1" + first_on_demand = 1 + spot_bid_price_percent = 100 } } ``` @@ -76,9 +76,9 @@ resource "databricks_cluster_policy" "this" { name = "Policy with predefined instance profile" definition = jsonencode({ # most likely policy might have way more things init. - "aws_attributes.instance_profile_arn": { - "type": "fixed", - "value": databricks_instance_profile.shared.arn + "aws_attributes.instance_profile_arn" : { + "type" : "fixed", + "value" : databricks_instance_profile.shared.arn } }) } diff --git a/docs/resources/ip_access_list.md b/docs/resources/ip_access_list.md index 54208d466a..9f2a065d50 100644 --- a/docs/resources/ip_access_list.md +++ b/docs/resources/ip_access_list.md @@ -12,12 +12,12 @@ Security-conscious enterprises that use cloud SaaS applications need to restrict ```hcl resource "databricks_workspace_conf" "this" { custom_config = { - "enableIpAccessLists": true + "enableIpAccessLists" : true } } resource "databricks_ip_access_list" "allowed-list" { - label = "allow_in" + label = "allow_in" list_type = "ALLOW" ip_addresses = [ "1.2.3.0/24", @@ -47,4 +47,4 @@ The databricks_ip_access_list can be imported using id: ```bash $ terraform import databricks_ip_access_list.this -``` \ No newline at end of file +``` diff --git a/docs/resources/library.md b/docs/resources/library.md index 63f870eab3..36863319d3 100644 --- a/docs/resources/library.md +++ b/docs/resources/library.md @@ -16,7 +16,7 @@ data "databricks_clusters" "all" { } resource "databricks_library" "cli" { - for_each = data.databricks_clusters.all.ids + for_each = data.databricks_clusters.all.ids cluster_id = each.key pypi { package = "databricks-cli" @@ -29,12 +29,12 @@ resource "databricks_library" "cli" { ```hcl resource "databricks_dbfs_file" "app" { source = "${path.module}/app-0.0.1.jar" - path = "/FileStore/app-0.0.1.jar" + path = "/FileStore/app-0.0.1.jar" } resource "databricks_library" "app" { cluster_id = databricks_cluster.this.id - jar = databricks_dbfs_file.app.dbfs_path + jar = databricks_dbfs_file.app.dbfs_path } ``` @@ -58,12 +58,12 @@ resource "databricks_library" "deequ" { ```hcl resource "databricks_dbfs_file" "app" { source = "${path.module}/baz.whl" - path = "/FileStore/baz.whl" + path = "/FileStore/baz.whl" } resource "databricks_library" "app" { cluster_id = databricks_cluster.this.id - whl = databricks_dbfs_file.app.dbfs_path + whl = databricks_dbfs_file.app.dbfs_path } ``` @@ -88,12 +88,12 @@ resource "databricks_library" "fbprophet" { ```hcl resource "databricks_dbfs_file" "app" { source = "${path.module}/foo.egg" - path = "/FileStore/foo.egg" + path = "/FileStore/foo.egg" } resource "databricks_library" "app" { cluster_id = databricks_cluster.this.id - egg = databricks_dbfs_file.app.dbfs_path + egg = databricks_dbfs_file.app.dbfs_path } ``` @@ -108,4 +108,4 @@ resource "databricks_library" "rkeops" { package = "rkeops" } } -``` \ No newline at end of file +``` diff --git a/docs/resources/mlflow_experiment.md b/docs/resources/mlflow_experiment.md index f2205ab191..b5436d0621 100644 --- a/docs/resources/mlflow_experiment.md +++ b/docs/resources/mlflow_experiment.md @@ -9,9 +9,9 @@ This resource allows you to create MLflow experiments in Databricks. ```hcl resource "databricks_mlflow_experiment" "test" { - name = "/Users/myuserid/my-experiment" + name = "/Users/myuserid/my-experiment" artifact_location = "dbfs:/tmp/my-experiment" - description = "My MLflow experiment description" + description = "My MLflow experiment description" } ``` diff --git a/docs/resources/mlflow_model.md b/docs/resources/mlflow_model.md index b7c881cca7..990fa38792 100644 --- a/docs/resources/mlflow_model.md +++ b/docs/resources/mlflow_model.md @@ -30,4 +30,4 @@ The following arguments are supported: * `name` - (Required) Name of MLflow model. * `description` - The description of the MLflow model. -* `tags` - Tags for the MLflow model. \ No newline at end of file +* `tags` - Tags for the MLflow model. diff --git a/docs/resources/mount.md b/docs/resources/mount.md index c3de7ded5f..bfd46ee7fa 100644 --- a/docs/resources/mount.md +++ b/docs/resources/mount.md @@ -31,12 +31,12 @@ This resource provides two ways of mounting a storage account: ```hcl locals { - tenant_id = "00000000-1111-2222-3333-444444444444" - client_id = "55555555-6666-7777-8888-999999999999" + tenant_id = "00000000-1111-2222-3333-444444444444" + client_id = "55555555-6666-7777-8888-999999999999" secret_scope = "some-kv" - secret_key = "some-sp-secret" - container = "test" - storage_acc = "lrs" + secret_key = "some-sp-secret" + container = "test" + storage_acc = "lrs" } resource "databricks_mount" "this" { @@ -44,12 +44,12 @@ resource "databricks_mount" "this" { uri = "abfss://${local.container}@${local.storage_acc}.dfs.core.windows.net" extra_configs = { - "fs.azure.account.auth.type": "OAuth", - "fs.azure.account.oauth.provider.type": "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider", - "fs.azure.account.oauth2.client.id": local.client_id, - "fs.azure.account.oauth2.client.secret": "{{secrets/${local.secret_scope}/${local.secret_key}}}", - "fs.azure.account.oauth2.client.endpoint": "https://login.microsoftonline.com/${local.tenant_id}/oauth2/token", - "fs.azure.createRemoteFileSystemDuringInitialization": "false", + "fs.azure.account.auth.type" : "OAuth", + "fs.azure.account.oauth.provider.type" : "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider", + "fs.azure.account.oauth2.client.id" : local.client_id, + "fs.azure.account.oauth2.client.secret" : "{{secrets/${local.secret_scope}/${local.secret_key}}}", + "fs.azure.account.oauth2.client.endpoint" : "https://login.microsoftonline.com/${local.tenant_id}/oauth2/token", + "fs.azure.createRemoteFileSystemDuringInitialization" : "false", } } ``` @@ -64,12 +64,12 @@ provider "azurerm" { } variable "resource_group" { - type = string + type = string description = "Resource group for Databricks Workspace" } variable "workspace_name" { - type = string + type = string description = "Name of the Databricks Workspace" } @@ -95,38 +95,38 @@ resource "databricks_cluster" "shared_passthrough" { spark_version = data.databricks_spark_version.latest.id node_type_id = data.databricks_node_type.smallest.id autotermination_minutes = 10 - num_workers = 1 - + num_workers = 1 + spark_conf = { - "spark.databricks.cluster.profile":"serverless", - "spark.databricks.repl.allowedLanguages":"python,sql", - "spark.databricks.passthrough.enabled": "true", - "spark.databricks.pyspark.enableProcessIsolation": "true" + "spark.databricks.cluster.profile" : "serverless", + "spark.databricks.repl.allowedLanguages" : "python,sql", + "spark.databricks.passthrough.enabled" : "true", + "spark.databricks.pyspark.enableProcessIsolation" : "true" } - + custom_tags = { - "ResourceClass": "Serverless" + "ResourceClass" : "Serverless" } } variable "storage_acc" { - type = string + type = string description = "Name of the ADLS Gen2 storage container" } variable "container" { - type = string + type = string description = "Name of container inside storage account" } resource "databricks_mount" "passthrough" { - name = "passthrough-test" + name = "passthrough-test" cluster_id = databricks_cluster.shared_passthrough.id - + uri = "abfss://${var.container}@${var.storage_acc}.dfs.core.windows.net" extra_configs = { - "fs.azure.account.auth.type": "CustomAccessToken", - "fs.azure.account.custom.token.provider.class": "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}", + "fs.azure.account.auth.type" : "CustomAccessToken", + "fs.azure.account.custom.token.provider.class" : "{{sparkconf/spark.databricks.passthrough.adls.gen2.tokenProviderClassName}}", } } ``` @@ -143,11 +143,11 @@ This block allows specifying parameters for mounting of the ADLS Gen2. The follo ```hcl // now you can do `%fs ls /mnt/experiments` in notebooks resource "databricks_mount" "this" { - name = "experiments" - s3 { - instance_profile = databricks_instance_profile.ds.id - bucket_name = aws_s3_bucket.this.bucket - } + name = "experiments" + s3 { + instance_profile = databricks_instance_profile.ds.id + bucket_name = aws_s3_bucket.this.bucket + } } ``` @@ -170,14 +170,14 @@ In this example, we're using Azure authentication, so we can omit some parameter ```hcl resource "databricks_secret_scope" "terraform" { - name = "application" - initial_manage_principal = "users" + name = "application" + initial_manage_principal = "users" } resource "databricks_secret" "service_principal_key" { - key = "service_principal_key" - string_value = "${var.ARM_CLIENT_SECRET}" - scope = databricks_secret_scope.terraform.name + key = "service_principal_key" + string_value = "${var.ARM_CLIENT_SECRET}" + scope = databricks_secret_scope.terraform.name } resource "azurerm_storage_account" "this" { @@ -203,14 +203,14 @@ resource "azurerm_storage_container" "this" { } resource "databricks_mount" "marketing" { - name = "marketing" - resource_id = azurerm_storage_container.this.id - abfs { - client_id = data.azurerm_client_config.current.client_id - client_secret_scope = databricks_secret_scope.terraform.name - client_secret_key = databricks_secret.service_principal_key.key - initialize_file_system = true - } + name = "marketing" + resource_id = azurerm_storage_container.this.id + abfs { + client_id = data.azurerm_client_config.current.client_id + client_secret_scope = databricks_secret_scope.terraform.name + client_secret_key = databricks_secret.service_principal_key.key + initialize_file_system = true + } } ``` @@ -228,7 +228,7 @@ resource "databricks_mount" "this_gs" { name = "gs-mount" gs { service_account = "acc@company.iam.gserviceaccount.com" - bucket_name = "mybucket" + bucket_name = "mybucket" } } ``` @@ -250,15 +250,15 @@ This block allows specifying parameters for mounting of the ADLS Gen1. The follo ```hcl resource "databricks_mount" "mount" { - name = "{var.RANDOM}" - adl { - storage_resource_name = "{env.TEST_STORAGE_ACCOUNT_NAME}" - tenant_id = data.azurerm_client_config.current.tenant_id - client_id = data.azurerm_client_config.current.client_id - client_secret_scope = databricks_secret_scope.terraform.name - client_secret_key = databricks_secret.service_principal_key.key - spark_conf_prefix = "fs.adl" - } + name = "{var.RANDOM}" + adl { + storage_resource_name = "{env.TEST_STORAGE_ACCOUNT_NAME}" + tenant_id = data.azurerm_client_config.current.tenant_id + client_id = data.azurerm_client_config.current.client_id + client_secret_scope = databricks_secret_scope.terraform.name + client_secret_key = databricks_secret.service_principal_key.key + spark_conf_prefix = "fs.adl" + } } ``` @@ -292,25 +292,25 @@ resource "azurerm_storage_container" "marketing" { } resource "databricks_secret_scope" "terraform" { - name = "application" - initial_manage_principal = "users" + name = "application" + initial_manage_principal = "users" } resource "databricks_secret" "storage_key" { - key = "blob_storage_key" - string_value = azurerm_storage_account.blobaccount.primary_access_key - scope = databricks_secret_scope.terraform.name + key = "blob_storage_key" + string_value = azurerm_storage_account.blobaccount.primary_access_key + scope = databricks_secret_scope.terraform.name } resource "databricks_mount" "marketing" { - name = "marketing" - wasb { - container_name = azurerm_storage_container.marketing.name - storage_account_name = azurerm_storage_account.blobaccount.name - auth_type = "ACCESS_KEY" - token_secret_scope = databricks_secret_scope.terraform.name - token_secret_key = databricks_secret.storage_key.key - } + name = "marketing" + wasb { + container_name = azurerm_storage_container.marketing.name + storage_account_name = azurerm_storage_account.blobaccount.name + auth_type = "ACCESS_KEY" + token_secret_scope = databricks_secret_scope.terraform.name + token_secret_key = databricks_secret.storage_key.key + } } ``` diff --git a/docs/resources/mws_customer_managed_keys.md b/docs/resources/mws_customer_managed_keys.md index 398f3ba078..9c5188acd9 100644 --- a/docs/resources/mws_customer_managed_keys.md +++ b/docs/resources/mws_customer_managed_keys.md @@ -64,12 +64,12 @@ resource "aws_kms_alias" "managed_services_customer_managed_key_alias" { } resource "databricks_mws_customer_managed_keys" "managed_services" { - account_id = var.databricks_account_id - aws_key_info { - key_arn = aws_kms_key.managed_services_customer_managed_key.arn - key_alias = aws_kms_alias.managed_services_customer_managed_key_alias.name - } - use_cases = ["MANAGED_SERVICES"] + account_id = var.databricks_account_id + aws_key_info { + key_arn = aws_kms_key.managed_services_customer_managed_key.arn + key_alias = aws_kms_alias.managed_services_customer_managed_key_alias.name + } + use_cases = ["MANAGED_SERVICES"] } # supply databricks_mws_customer_managed_keys.managed_services.customer_managed_key_id as managed_services_customer_managed_key_id for databricks_mws_workspaces ``` @@ -164,12 +164,12 @@ resource "aws_kms_alias" "storage_customer_managed_key_alias" { } resource "databricks_mws_customer_managed_keys" "storage" { - account_id = var.databricks_account_id - aws_key_info { - key_arn = aws_kms_key.storage_customer_managed_key.arn - key_alias = aws_kms_alias.storage_customer_managed_key_alias.name - } - use_cases = ["STORAGE"] + account_id = var.databricks_account_id + aws_key_info { + key_arn = aws_kms_key.storage_customer_managed_key.arn + key_alias = aws_kms_alias.storage_customer_managed_key_alias.name + } + use_cases = ["STORAGE"] } # supply databricks_mws_customer_managed_keys.storage.customer_managed_key_id as storage_customer_managed_key_id for databricks_mws_workspaces ``` diff --git a/docs/resources/mws_log_delivery.md b/docs/resources/mws_log_delivery.md index 28ee1d7676..b8d9902ebb 100644 --- a/docs/resources/mws_log_delivery.md +++ b/docs/resources/mws_log_delivery.md @@ -34,7 +34,7 @@ resource "aws_s3_bucket_public_access_block" "logdelivery" { } data "databricks_aws_assume_role_policy" "logdelivery" { - external_id = var.databricks_account_id + external_id = var.databricks_account_id for_log_delivery = true } @@ -56,35 +56,35 @@ resource "aws_s3_bucket_policy" "logdelivery" { } resource "databricks_mws_credentials" "log_writer" { - account_id = var.databricks_account_id - credentials_name = "Usage Delivery" - role_arn = aws_iam_role.logdelivery.arn + account_id = var.databricks_account_id + credentials_name = "Usage Delivery" + role_arn = aws_iam_role.logdelivery.arn } resource "databricks_mws_storage_configurations" "log_bucket" { - account_id = var.databricks_account_id - storage_configuration_name = "Usage Logs" - bucket_name = aws_s3_bucket.logdelivery.bucket + account_id = var.databricks_account_id + storage_configuration_name = "Usage Logs" + bucket_name = aws_s3_bucket.logdelivery.bucket } resource "databricks_mws_log_delivery" "usage_logs" { - account_id = var.databricks_account_id - credentials_id = databricks_mws_credentials.log_writer.credentials_id - storage_configuration_id = databricks_mws_storage_configurations.log_bucket.storage_configuration_id - delivery_path_prefix = "billable-usage" - config_name = "Usage Logs" - log_type = "BILLABLE_USAGE" - output_format = "CSV" + account_id = var.databricks_account_id + credentials_id = databricks_mws_credentials.log_writer.credentials_id + storage_configuration_id = databricks_mws_storage_configurations.log_bucket.storage_configuration_id + delivery_path_prefix = "billable-usage" + config_name = "Usage Logs" + log_type = "BILLABLE_USAGE" + output_format = "CSV" } resource "databricks_mws_log_delivery" "audit_logs" { - account_id = var.databricks_account_id - credentials_id = databricks_mws_credentials.log_writer.credentials_id - storage_configuration_id = databricks_mws_storage_configurations.log_bucket.storage_configuration_id - delivery_path_prefix = "audit-logs" - config_name = "Audit Logs" - log_type = "AUDIT_LOGS" - output_format = "JSON" + account_id = var.databricks_account_id + credentials_id = databricks_mws_credentials.log_writer.credentials_id + storage_configuration_id = databricks_mws_storage_configurations.log_bucket.storage_configuration_id + delivery_path_prefix = "audit-logs" + config_name = "Audit Logs" + log_type = "AUDIT_LOGS" + output_format = "JSON" } ``` @@ -96,13 +96,13 @@ Common processing scenario is to apply [cost allocation tags](https://docs.aws.a ```hcl resource "databricks_mws_log_delivery" "usage_logs" { - account_id = var.databricks_account_id - credentials_id = databricks_mws_credentials.log_writer.credentials_id - storage_configuration_id = databricks_mws_storage_configurations.log_bucket.storage_configuration_id - delivery_path_prefix = "billable-usage" - config_name = "Usage Logs" - log_type = "BILLABLE_USAGE" - output_format = "CSV" + account_id = var.databricks_account_id + credentials_id = databricks_mws_credentials.log_writer.credentials_id + storage_configuration_id = databricks_mws_storage_configurations.log_bucket.storage_configuration_id + delivery_path_prefix = "billable-usage" + config_name = "Usage Logs" + log_type = "BILLABLE_USAGE" + output_format = "CSV" } ``` @@ -112,13 +112,13 @@ JSON files with [static schema](https://docs.databricks.com/administration-guide ```hcl resource "databricks_mws_log_delivery" "audit_logs" { - account_id = var.databricks_account_id - credentials_id = databricks_mws_credentials.log_writer.credentials_id - storage_configuration_id = databricks_mws_storage_configurations.log_bucket.storage_configuration_id - delivery_path_prefix = "audit-logs" - config_name = "Audit Logs" - log_type = "AUDIT_LOGS" - output_format = "JSON" + account_id = var.databricks_account_id + credentials_id = databricks_mws_credentials.log_writer.credentials_id + storage_configuration_id = databricks_mws_storage_configurations.log_bucket.storage_configuration_id + delivery_path_prefix = "audit-logs" + config_name = "Audit Logs" + log_type = "AUDIT_LOGS" + output_format = "JSON" } ``` diff --git a/docs/resources/mws_networks.md b/docs/resources/mws_networks.md index 0b5fa2601c..a12bbcc4c7 100644 --- a/docs/resources/mws_networks.md +++ b/docs/resources/mws_networks.md @@ -64,7 +64,7 @@ resource "databricks_mws_networks" "this" { In order to create a VPC [that leverages AWS PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html) you would need to add the `vpc_endpoint_id` Attributes from [mws_vpc_endpoint](mws_vpc_endpoint.md) resources into the [databricks_mws_networks](databricks_mws_networks.md) resource. For example: ```hcl - resource "databricks_mws_networks" "this" { +resource "databricks_mws_networks" "this" { provider = databricks.mws account_id = var.databricks_account_id network_name = "${local.prefix}-network" @@ -72,10 +72,10 @@ In order to create a VPC [that leverages AWS PrivateLink](https://docs.databrick subnet_ids = module.vpc.private_subnets vpc_id = module.vpc.vpc_id vpc_endpoints { - dataplane_relay = [databricks_mws_vpc_endpoint.relay.vpc_endpoint_id] - rest_api = [databricks_mws_vpc_endpoint.workspace.vpc_endpoint_id] + dataplane_relay = [databricks_mws_vpc_endpoint.relay.vpc_endpoint_id] + rest_api = [databricks_mws_vpc_endpoint.workspace.vpc_endpoint_id] } - depends_on = [aws_vpc_endpoint.workspace, aws_vpc_endpoint.relay] + depends_on = [aws_vpc_endpoint.workspace, aws_vpc_endpoint.relay] } ``` diff --git a/docs/resources/mws_workspaces.md b/docs/resources/mws_workspaces.md index f93bedfb54..94169fcfab 100644 --- a/docs/resources/mws_workspaces.md +++ b/docs/resources/mws_workspaces.md @@ -11,7 +11,7 @@ This resource allows you to set up [workspaces in E2 architecture on AWS](https: ```hcl provider "databricks" { - host = module.ai.databricks_host + host = module.ai.databricks_host token = module.ai.databricks_token } ``` @@ -55,11 +55,11 @@ resource "databricks_mws_storage_configurations" "this" { // register VPC resource "databricks_mws_networks" "this" { - provider = databricks.mws - account_id = var.databricks_account_id - network_name = "${var.prefix}-network" - vpc_id = var.vpc_id - subnet_ids = var.subnets_private + provider = databricks.mws + account_id = var.databricks_account_id + network_name = "${var.prefix}-network" + vpc_id = var.vpc_id + subnet_ids = var.subnets_private security_group_ids = [var.security_group] } @@ -71,9 +71,9 @@ resource "databricks_mws_workspaces" "this" { deployment_name = var.prefix aws_region = var.region - credentials_id = databricks_mws_credentials.this.credentials_id - storage_configuration_id = databricks_mws_storage_configurations.this.storage_configuration_id - network_id = databricks_mws_networks.this.network_id + credentials_id = databricks_mws_credentials.this.credentials_id + storage_configuration_id = databricks_mws_storage_configurations.this.storage_configuration_id + network_id = databricks_mws_networks.this.network_id token {} } @@ -169,9 +169,9 @@ resource "databricks_mws_workspaces" "this" { deployment_name = local.prefix aws_region = "us-east-1" - credentials_id = databricks_mws_credentials.this.credentials_id - storage_configuration_id = databricks_mws_storage_configurations.this.storage_configuration_id - + credentials_id = databricks_mws_credentials.this.credentials_id + storage_configuration_id = databricks_mws_storage_configurations.this.storage_configuration_id + token {} } diff --git a/docs/resources/notebook.md b/docs/resources/notebook.md index bc354b3bcf..333357a718 100644 --- a/docs/resources/notebook.md +++ b/docs/resources/notebook.md @@ -15,7 +15,7 @@ data "databricks_current_user" "me" { resource "databricks_notebook" "ddl" { source = "${path.module}/DDLgen.py" - path = "${data.databricks_current_user.me.home}/AA/BB/CC" + path = "${data.databricks_current_user.me.home}/AA/BB/CC" } ``` @@ -28,7 +28,7 @@ resource "databricks_notebook" "notebook" { display(spark.range(10)) EOT ) - path = "/Shared/Demo" + path = "/Shared/Demo" language = "PYTHON" } ``` @@ -38,7 +38,7 @@ You can also manage [Databricks Archives](https://docs.databricks.com/notebooks/ ```hcl resource "databricks_notebook" "lesson" { source = "${path.module}/IntroNotebooks.dbc" - path = "/Shared/Intro" + path = "/Shared/Intro" } ``` diff --git a/docs/resources/obo_token.md b/docs/resources/obo_token.md index f9ff0face6..d14794573c 100644 --- a/docs/resources/obo_token.md +++ b/docs/resources/obo_token.md @@ -18,19 +18,19 @@ resource "databricks_permissions" "token_usage" { authorization = "tokens" access_control { service_principal_name = databricks_service_principal.this.application_id - permission_level = "CAN_USE" + permission_level = "CAN_USE" } } resource "databricks_obo_token" "this" { - depends_on = [databricks_permissions.token_usage] - application_id = databricks_service_principal.this.application_id - comment = "PAT on behalf of ${databricks_service_principal.this.display_name}" + depends_on = [databricks_permissions.token_usage] + application_id = databricks_service_principal.this.application_id + comment = "PAT on behalf of ${databricks_service_principal.this.display_name}" lifetime_seconds = 3600 } output "obo" { - value = databricks_obo_token.this.token_value + value = databricks_obo_token.this.token_value sensitive = true } ``` @@ -47,14 +47,14 @@ data "databricks_group" "admins" { } resource "databricks_group_member" "this" { - group_id = data.databricks_group.admins.id + group_id = data.databricks_group.admins.id member_id = databricks_service_principal.this.id } resource "databricks_obo_token" "this" { - depends_on = [databricks_group_member.this] - application_id = databricks_service_principal.this.application_id - comment = "PAT on behalf of ${databricks_service_principal.this.display_name}" + depends_on = [databricks_group_member.this] + application_id = databricks_service_principal.this.application_id + comment = "PAT on behalf of ${databricks_service_principal.this.display_name}" lifetime_seconds = 3600 } ``` @@ -72,4 +72,4 @@ The following arguments are required: In addition to all arguments above, the following attributes are exported: * `id` - Canonical unique identifier for the token. -* `token_value` - **Sensitive** value of the newly-created token. \ No newline at end of file +* `token_value` - **Sensitive** value of the newly-created token. diff --git a/docs/resources/permissions.md b/docs/resources/permissions.md index f3bae77db3..9717e92836 100644 --- a/docs/resources/permissions.md +++ b/docs/resources/permissions.md @@ -14,45 +14,45 @@ It's possible to separate [cluster access control](https://docs.databricks.com/s ```hcl resource "databricks_group" "auto" { - display_name = "Automation" + display_name = "Automation" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_group" "ds" { - display_name = "Data Science" + display_name = "Data Science" } resource "databricks_cluster" "shared_autoscaling" { - cluster_name = "Shared Autoscaling" - spark_version = "6.6.x-scala2.11" - node_type_id = "Standard_DS3_v2" - autotermination_minutes = 60 - autoscale { - min_workers = 1 - max_workers = 10 - } + cluster_name = "Shared Autoscaling" + spark_version = "6.6.x-scala2.11" + node_type_id = "Standard_DS3_v2" + autotermination_minutes = 60 + autoscale { + min_workers = 1 + max_workers = 10 + } } resource "databricks_permissions" "cluster_usage" { - cluster_id = databricks_cluster.shared_autoscaling.cluster_id + cluster_id = databricks_cluster.shared_autoscaling.cluster_id - access_control { - group_name = databricks_group.auto.display_name - permission_level = "CAN_ATTACH_TO" - } + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_ATTACH_TO" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_RESTART" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_RESTART" + } - access_control { - group_name = databricks_group.ds.display_name - permission_level = "CAN_MANAGE" - } + access_control { + group_name = databricks_group.ds.display_name + permission_level = "CAN_MANAGE" + } } ``` @@ -62,37 +62,37 @@ Cluster policies allow creation of [clusters](cluster.md), that match [given pol ```hcl resource "databricks_group" "ds" { - display_name = "Data Science" + display_name = "Data Science" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_cluster_policy" "something_simple" { - name = "Some simple policy" - definition = jsonencode({ - "spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL": { - "type": "forbidden" - }, - "spark_conf.spark.secondkey": { - "type": "forbidden" - } - }) + name = "Some simple policy" + definition = jsonencode({ + "spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL" : { + "type" : "forbidden" + }, + "spark_conf.spark.secondkey" : { + "type" : "forbidden" + } + }) } resource "databricks_permissions" "policy_usage" { - cluster_policy_id = databricks_cluster_policy.something_simple.id + cluster_policy_id = databricks_cluster_policy.something_simple.id - access_control { - group_name = databricks_group.ds.display_name - permission_level = "CAN_USE" - } + access_control { + group_name = databricks_group.ds.display_name + permission_level = "CAN_USE" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_USE" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_USE" + } } ``` @@ -102,33 +102,33 @@ resource "databricks_permissions" "policy_usage" { ```hcl resource "databricks_group" "auto" { - display_name = "Automation" + display_name = "Automation" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_instance_pool" "this" { - instance_pool_name = "Reserved Instances" - idle_instance_autotermination_minutes = 60 - node_type_id = "i3.xlarge" - min_idle_instances = 0 - max_capacity = 10 + instance_pool_name = "Reserved Instances" + idle_instance_autotermination_minutes = 60 + node_type_id = "i3.xlarge" + min_idle_instances = 0 + max_capacity = 10 } resource "databricks_permissions" "pool_usage" { - instance_pool_id = databricks_instance_pool.this.id + instance_pool_id = databricks_instance_pool.this.id - access_control { - group_name = databricks_group.auto.display_name - permission_level = "CAN_ATTACH_TO" - } + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_ATTACH_TO" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_MANAGE" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_MANAGE" + } } ``` @@ -144,11 +144,11 @@ There are four assignable [permission levels](https://docs.databricks.com/securi ```hcl resource "databricks_group" "auto" { - display_name = "Automation" + display_name = "Automation" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_service_principal" "aws_principal" { @@ -156,42 +156,42 @@ resource "databricks_service_principal" "aws_principal" { } resource "databricks_job" "this" { - name = "Featurization" - max_concurrent_runs = 1 + name = "Featurization" + max_concurrent_runs = 1 - new_cluster { - num_workers = 300 - spark_version = "6.6.x-scala2.11" - node_type_id = "Standard_DS3_v2" - } + new_cluster { + num_workers = 300 + spark_version = "6.6.x-scala2.11" + node_type_id = "Standard_DS3_v2" + } - notebook_task { - notebook_path = "/Production/MakeFeatures" - } + notebook_task { + notebook_path = "/Production/MakeFeatures" + } } resource "databricks_permissions" "job_usage" { - job_id = databricks_job.this.id + job_id = databricks_job.this.id - access_control { - group_name = "users" - permission_level = "CAN_VIEW" - } + access_control { + group_name = "users" + permission_level = "CAN_VIEW" + } - access_control { - group_name = databricks_group.auto.display_name - permission_level = "CAN_MANAGE_RUN" - } + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_MANAGE_RUN" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_MANAGE" - } - - access_control { - service_principal_name = databricks_service_principal.aws_principal.application_id - permission_level = "IS_OWNER" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_MANAGE" + } + + access_control { + service_principal_name = databricks_service_principal.aws_principal.application_id + permission_level = "IS_OWNER" + } } ``` @@ -201,36 +201,36 @@ Valid [permission levels](https://docs.databricks.com/security/access-control/wo ```hcl resource "databricks_group" "auto" { - display_name = "Automation" + display_name = "Automation" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_notebook" "this" { - content_base64 = base64encode("# Welcome to your Python notebook") - path = "/Production/ETL/Features" - language = "PYTHON" + content_base64 = base64encode("# Welcome to your Python notebook") + path = "/Production/ETL/Features" + language = "PYTHON" } resource "databricks_permissions" "notebook_usage" { - notebook_path = databricks_notebook.this.path + notebook_path = databricks_notebook.this.path - access_control { - group_name = "users" - permission_level = "CAN_READ" - } + access_control { + group_name = "users" + permission_level = "CAN_READ" + } - access_control { - group_name = databricks_group.auto.display_name - permission_level = "CAN_RUN" - } + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_RUN" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_EDIT" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_EDIT" + } } ``` @@ -245,35 +245,35 @@ Valid [permission levels](https://docs.databricks.com/security/access-control/wo ```hcl resource "databricks_group" "auto" { - display_name = "Automation" + display_name = "Automation" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_directory" "this" { - path = "/Production/ETL" + path = "/Production/ETL" } resource "databricks_permissions" "folder_usage" { - directory_path = databricks_directory.this.path - depends_on = [databricks_directory.this] + directory_path = databricks_directory.this.path + depends_on = [databricks_directory.this] - access_control { - group_name = "users" - permission_level = "CAN_READ" - } + access_control { + group_name = "users" + permission_level = "CAN_READ" + } - access_control { - group_name = databricks_group.auto.display_name - permission_level = "CAN_RUN" - } + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_RUN" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_EDIT" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_EDIT" + } } ``` @@ -283,11 +283,11 @@ Valid [permission levels](https://docs.databricks.com/security/access-control/wo ```hcl resource "databricks_group" "auto" { - display_name = "Automation" + display_name = "Automation" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_repo" "this" { @@ -295,22 +295,22 @@ resource "databricks_repo" "this" { } resource "databricks_permissions" "repo_usage" { - repo_id = databricks_repo.this.id + repo_id = databricks_repo.this.id - access_control { - group_name = "users" - permission_level = "CAN_READ" - } + access_control { + group_name = "users" + permission_level = "CAN_READ" + } - access_control { - group_name = databricks_group.auto.display_name - permission_level = "CAN_RUN" - } + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_RUN" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_EDIT" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_EDIT" + } } ``` @@ -320,16 +320,16 @@ By default on AWS deployments, all admin users can sign in to Databricks using e ```hcl resource "databricks_group" "guests" { - display_name = "Guest Users" + display_name = "Guest Users" } resource "databricks_permissions" "password_usage" { - authorization = "passwords" + authorization = "passwords" - access_control { - group_name = databricks_group.guests.display_name - permission_level = "CAN_USE" - } + access_control { + group_name = databricks_group.guests.display_name + permission_level = "CAN_USE" + } } ``` @@ -339,25 +339,25 @@ Only [possible permission](https://docs.databricks.com/administration-guide/acce ```hcl resource "databricks_group" "auto" { - display_name = "Automation" + display_name = "Automation" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_permissions" "token_usage" { - authorization = "tokens" + authorization = "tokens" - access_control { - group_name = databricks_group.auto.display_name - permission_level = "CAN_USE" - } + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_USE" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_USE" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_USE" + } } ``` @@ -369,38 +369,38 @@ resource "databricks_permissions" "token_usage" { data "databricks_current_user" "me" {} resource "databricks_group" "auto" { - display_name = "Automation" + display_name = "Automation" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_sql_endpoint" "this" { - name = "Endpoint of ${data.databricks_current_user.me.alphanumeric}" - cluster_size = "Small" + name = "Endpoint of ${data.databricks_current_user.me.alphanumeric}" + cluster_size = "Small" max_num_clusters = 1 tags { custom_tags { - key = "City" - value = "Amsterdam" + key = "City" + value = "Amsterdam" } } } resource "databricks_permissions" "endpoint_usage" { - sql_endpoint_id = databricks_sql_endpoint.this.id + sql_endpoint_id = databricks_sql_endpoint.this.id - access_control { - group_name = databricks_group.auto.display_name - permission_level = "CAN_USE" - } + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_USE" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_MANAGE" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_MANAGE" + } } ``` @@ -410,25 +410,25 @@ resource "databricks_permissions" "endpoint_usage" { ```hcl resource "databricks_group" "auto" { - display_name = "Automation" + display_name = "Automation" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_permissions" "endpoint_usage" { - sql_dashboard_id = "3244325" + sql_dashboard_id = "3244325" - access_control { - group_name = databricks_group.auto.display_name - permission_level = "CAN_RUN" - } + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_RUN" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_MANAGE" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_MANAGE" + } } ``` @@ -438,25 +438,25 @@ resource "databricks_permissions" "endpoint_usage" { ```hcl resource "databricks_group" "auto" { - display_name = "Automation" + display_name = "Automation" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_permissions" "endpoint_usage" { - sql_query_id = "3244325" + sql_query_id = "3244325" - access_control { - group_name = databricks_group.auto.display_name - permission_level = "CAN_RUN" - } + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_RUN" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_MANAGE" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_MANAGE" + } } ``` @@ -466,25 +466,25 @@ resource "databricks_permissions" "endpoint_usage" { ```hcl resource "databricks_group" "auto" { - display_name = "Automation" + display_name = "Automation" } resource "databricks_group" "eng" { - display_name = "Engineering" + display_name = "Engineering" } resource "databricks_permissions" "endpoint_usage" { - sql_alert_id = "3244325" + sql_alert_id = "3244325" - access_control { - group_name = databricks_group.auto.display_name - permission_level = "CAN_RUN" - } + access_control { + group_name = databricks_group.auto.display_name + permission_level = "CAN_RUN" + } - access_control { - group_name = databricks_group.eng.display_name - permission_level = "CAN_MANAGE" - } + access_control { + group_name = databricks_group.eng.display_name + permission_level = "CAN_MANAGE" + } } ``` @@ -520,8 +520,8 @@ One or more `access_control` blocks are required to actually set the permission ```hcl access_control { - group_name = databricks_group.datascience.display_name - permission_level = "CAN_USE" + group_name = databricks_group.datascience.display_name + permission_level = "CAN_USE" } ``` diff --git a/docs/resources/secret.md b/docs/resources/secret.md index 8817f4d5ab..042f03d4c9 100644 --- a/docs/resources/secret.md +++ b/docs/resources/secret.md @@ -9,12 +9,12 @@ With this resource you can insert a secret under the provided scope with the giv ```hcl resource "databricks_secret_scope" "app" { - name = "application-secret-scope" + name = "application-secret-scope" } resource "databricks_secret" "publishing_api" { - key = "publishing_api" - string_value = data.azurerm_key_vault_secret.example.value - scope = databricks_secret_scope.app.id + key = "publishing_api" + string_value = data.azurerm_key_vault_secret.example.value + scope = databricks_secret_scope.app.id } ``` @@ -41,4 +41,4 @@ The resource secret can be imported using `scopeName|||secretKey` combination. * ```bash $ terraform import databricks_secret.app `scopeName|||secretKey` -``` \ No newline at end of file +``` diff --git a/docs/resources/secret_acl.md b/docs/resources/secret_acl.md index 47bfa09ddd..3b3bd0cf7e 100644 --- a/docs/resources/secret_acl.md +++ b/docs/resources/secret_acl.md @@ -15,20 +15,20 @@ resource "databricks_group" "ds" { } resource "databricks_secret_scope" "app" { - name = "app-secret-scope" + name = "app-secret-scope" } resource "databricks_secret_acl" "my_secret_acl" { - principal = databricks_group.ds.display_name - permission = "READ" - scope = databricks_secret_scope.app.name + principal = databricks_group.ds.display_name + permission = "READ" + scope = databricks_secret_scope.app.name } resource "databricks_secret" "publishing_api" { - key = "publishing_api" - // replace it with secret management solution of your choice :-) - string_value = data.azurerm_key_vault_secret.example.value - scope = databricks_secret_scope.app.name + key = "publishing_api" + // replace it with secret management solution of your choice :-) + string_value = data.azurerm_key_vault_secret.example.value + scope = databricks_secret_scope.app.name } ``` @@ -46,4 +46,4 @@ The resource secret acl can be imported using `scopeName|||principalName` combin ```bash $ terraform import databricks_secret_acl.object `scopeName|||principalName` -``` \ No newline at end of file +``` diff --git a/docs/resources/secret_scope.md b/docs/resources/secret_scope.md index c0e24bf0f3..a75472eecb 100644 --- a/docs/resources/secret_scope.md +++ b/docs/resources/secret_scope.md @@ -53,7 +53,7 @@ resource "databricks_secret_scope" "kv" { keyvault_metadata { resource_id = azurerm_key_vault.this.id - dns_name = azurerm_key_vault.this.vault_uri + dns_name = azurerm_key_vault.this.vault_uri } } ``` diff --git a/docs/resources/service_principal.md b/docs/resources/service_principal.md index 4e362aa830..b3b8f678c3 100644 --- a/docs/resources/service_principal.md +++ b/docs/resources/service_principal.md @@ -23,11 +23,11 @@ data "databricks_group" "admins" { } resource "databricks_service_principal" "sp" { - application_id = "00000000-0000-0000-0000-000000000000" + application_id = "00000000-0000-0000-0000-000000000000" } resource "databricks_group_member" "i-am-admin" { - group_id = data.databricks_group.admins.id + group_id = data.databricks_group.admins.id member_id = databricks_service_principal.sp.id } ``` @@ -36,8 +36,8 @@ Creating service principal with cluster create permissions: ```hcl resource "databricks_service_principal" "sp" { - application_id = "00000000-0000-0000-0000-000000000000" - display_name = "Example service principal" + application_id = "00000000-0000-0000-0000-000000000000" + display_name = "Example service principal" allow_cluster_create = true } ``` diff --git a/docs/resources/sql_endpoint.md b/docs/resources/sql_endpoint.md index 3f05ce6551..2c77b7840e 100644 --- a/docs/resources/sql_endpoint.md +++ b/docs/resources/sql_endpoint.md @@ -11,14 +11,14 @@ To create [SQL endpoints](https://docs.databricks.com/sql/get-started/concepts.h data "databricks_current_user" "me" {} resource "databricks_sql_endpoint" "this" { - name = "Endpoint of ${data.databricks_current_user.me.alphanumeric}" - cluster_size = "Small" + name = "Endpoint of ${data.databricks_current_user.me.alphanumeric}" + cluster_size = "Small" max_num_clusters = 1 tags { custom_tags { - key = "City" - value = "Amsterdam" + key = "City" + value = "Amsterdam" } } } diff --git a/docs/resources/sql_global_config.md b/docs/resources/sql_global_config.md index b1648a3859..f68676066d 100644 --- a/docs/resources/sql_global_config.md +++ b/docs/resources/sql_global_config.md @@ -9,10 +9,10 @@ This resource configures the security policy, [databricks_instance_profile](inst ```hcl resource "databricks_sql_global_config" "this" { - security_policy = "DATA_ACCESS_CONTROL" + security_policy = "DATA_ACCESS_CONTROL" instance_profile_arn = "arn:...." data_access_config = { - "spark.sql.session.timeZone": "UTC" + "spark.sql.session.timeZone" : "UTC" } } ``` diff --git a/docs/resources/sql_permissions.md b/docs/resources/sql_permissions.md index 3bcf1ffaee..0cd1433655 100644 --- a/docs/resources/sql_permissions.md +++ b/docs/resources/sql_permissions.md @@ -10,11 +10,11 @@ resource "databricks_cluster" "cluster_with_table_access_control" { // ... spark_conf = { - "spark.databricks.acl.dfAclsEnabled": "true", - "spark.databricks.repl.allowedLanguages": "python,sql", + "spark.databricks.acl.dfAclsEnabled" : "true", + "spark.databricks.repl.allowedLanguages" : "python,sql", } -} +} ``` It could be combined with creation of High-Concurrency and Single-Node clusters - in this case it should have corresponding `custom_tags` and `spark.databricks.cluster.profile` in Spark configuration as described in [documentation for `databricks_cluster` resource](cluster.md). @@ -40,17 +40,17 @@ The following resource definition will enforce access control on a table by exec ```hcl resource "databricks_sql_permissions" "foo_table" { - table = "foo" + table = "foo" - privilege_assignments { - principal = "serge@example.com" - privileges = ["SELECT", "MODIFY"] - } + privilege_assignments { + principal = "serge@example.com" + privileges = ["SELECT", "MODIFY"] + } - privilege_assignments { - principal = "special group" - privileges = ["SELECT"] - } + privilege_assignments { + principal = "special group" + privileges = ["SELECT"] + } } ``` diff --git a/docs/resources/sql_query.md b/docs/resources/sql_query.md index d723d65dee..9bf76cd69b 100644 --- a/docs/resources/sql_query.md +++ b/docs/resources/sql_query.md @@ -14,9 +14,9 @@ A query may have one or more [visualizations](sql_visualization.md). ```hcl resource "databricks_sql_query" "q1" { data_source_id = databricks_sql_endpoint.example.data_source_id - name = "My Query Name" - query = "SELECT {{ p1 }} AS p1, 2 as p2" - run_as_role = "viewer" + name = "My Query Name" + query = "SELECT {{ p1 }} AS p1, 2 as p2" + run_as_role = "viewer" schedule { continuous { @@ -25,7 +25,7 @@ resource "databricks_sql_query" "q1" { } parameter { - name = "p1" + name = "p1" title = "Title for p1" text { value = "default" diff --git a/docs/resources/sql_visualization.md b/docs/resources/sql_visualization.md index 0b9257bf84..896be24e28 100644 --- a/docs/resources/sql_visualization.md +++ b/docs/resources/sql_visualization.md @@ -13,9 +13,9 @@ A visualization is always tied to a [query](sql_query.md). Every query may have ```hcl resource "databricks_sql_visualization" "q1v1" { - query_id = databricks_sql_query.q1.id - type = "table" - name = "My Table" + query_id = databricks_sql_query.q1.id + type = "table" + name = "My Table" description = "Some Description" // The options encoded in this field are passed verbatim to the SQLA API. diff --git a/docs/resources/token.md b/docs/resources/token.md index fa49f027d7..c3dc34e912 100644 --- a/docs/resources/token.md +++ b/docs/resources/token.md @@ -10,9 +10,9 @@ This resource creates an api token that can be used to create Databricks resourc ```hcl // initialize provider in normal mode provider "databricks" { - alias = "created_workspace" - - host = databricks_mws_workspaces.this.workspace_url + alias = "created_workspace" + + host = databricks_mws_workspaces.this.workspace_url } // create PAT token to provision entities within workspace @@ -42,4 +42,4 @@ The following arguments are available: In addition to all arguments above, the following attributes are exported: * `id` - Canonical unique identifier for the token. -* `token_value` - **Sensitive** value of the newly-created token. \ No newline at end of file +* `token_value` - **Sensitive** value of the newly-created token. diff --git a/docs/resources/user.md b/docs/resources/user.md index 304fb8ef17..246dd14973 100644 --- a/docs/resources/user.md +++ b/docs/resources/user.md @@ -19,15 +19,15 @@ Creating user with administrative permissions - referencing special `admins` [da ```hcl data "databricks_group" "admins" { - display_name = "admins" + display_name = "admins" } resource "databricks_user" "me" { - user_name = "me@example.com" + user_name = "me@example.com" } resource "databricks_group_member" "i-am-admin" { - group_id = data.databricks_group.admins.id + group_id = data.databricks_group.admins.id member_id = databricks_user.me.id } ``` @@ -36,8 +36,8 @@ Creating user with cluster create permissions: ```hcl resource "databricks_user" "me" { - user_name = "me@example.com" - display_name = "Example user" + user_name = "me@example.com" + display_name = "Example user" allow_cluster_create = true } ``` diff --git a/docs/resources/user_instance_profile.md b/docs/resources/user_instance_profile.md index 0e632bd06f..dcca899ddf 100644 --- a/docs/resources/user_instance_profile.md +++ b/docs/resources/user_instance_profile.md @@ -11,16 +11,16 @@ This resource allows you to attach instance profiles to users. ```hcl resource "databricks_instance_profile" "instance_profile" { - instance_profile_arn = "my_instance_profile_arn" + instance_profile_arn = "my_instance_profile_arn" } resource "databricks_user" "my_user" { - user_name = "me@example.com" + user_name = "me@example.com" } resource "databricks_user_instance_profile" "my_user_instance_profile" { - user_id = databricks_user.my_user.id - instance_profile_id = databricks_instance_profile.instance_profile.id + user_id = databricks_user.my_user.id + instance_profile_id = databricks_instance_profile.instance_profile.id } ``` ## Argument Reference diff --git a/docs/resources/workspace_conf.md b/docs/resources/workspace_conf.md index 6191be3db8..15519dc718 100644 --- a/docs/resources/workspace_conf.md +++ b/docs/resources/workspace_conf.md @@ -17,9 +17,9 @@ Allows specification of custom configuration properties for expert usage: ```hcl resource "databricks_workspace_conf" "this" { - custom_config = { - "enableIpAccessLists": true - } + custom_config = { + "enableIpAccessLists" : true + } } ``` diff --git a/scripts/README.md b/scripts/README.md index 49ddfa1dfb..ded4f95360 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -104,4 +104,4 @@ func TestAccListClustersIntegration(t *testing.T) { // ... } -``` \ No newline at end of file +``` diff --git a/scripts/azvnet-integration/README.md b/scripts/azvnet-integration/README.md index a3143a6ed7..4620e4e553 100644 --- a/scripts/azvnet-integration/README.md +++ b/scripts/azvnet-integration/README.md @@ -30,4 +30,4 @@ ## notebook.py -* Very basic notebook export, just listing file on the mount and displaying job parameter \ No newline at end of file +* Very basic notebook export, just listing file on the mount and displaying job parameter diff --git a/scripts/gcp-integration/README.md b/scripts/gcp-integration/README.md index ce0a8f7e5a..d76239a607 100644 --- a/scripts/gcp-integration/README.md +++ b/scripts/gcp-integration/README.md @@ -3,4 +3,4 @@ make test-gcp Used for running integration tests on GCP. -* `DATABRICKS_GOOGLE_SERVICE_ACCOUNT` is the account created through [this module](../gcp-accounts-integration/service-account/main.tf). \ No newline at end of file +* `DATABRICKS_GOOGLE_SERVICE_ACCOUNT` is the account created through [this module](../gcp-accounts-integration/service-account/main.tf).