diff --git a/.gitignore b/.gitignore index 1202d5f..8c87ce0 100644 --- a/.gitignore +++ b/.gitignore @@ -4,12 +4,14 @@ stackql-azure-cloud-shell.sh stackql-google-cloud-shell.sh stackql /.stackql -.env +**/.env .pypirc stack/ oss-activity-monitor/ testcreds/ *.log +venv/ +.venv/ # Byte-compiled / optimized / DLL files __pycache__/ @@ -80,3 +82,5 @@ instance/ # Sphinx documentation docs/_build/ + +.DS_Store diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..2a3bc49 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "files.associations": { + "*.iql": "sql" + } +} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 53138a5..be70b7a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 1.8.3 (2025-02-08) + +- Added walkthrough for databricks bootstrap on aws. +- Bugfix for expport variables on dry run. + +## 1.8.2 (2025-01-16) + +- Added timing output for `build`, `test` and `teardown` operations + +## 1.8.1 (2025-01-11) + +- Added `uuid()` templating function +- Exports evaluation optimization for teardown operations + ## 1.8.0 (2024-11-09) - Added option for command specific authentication diff --git a/README.md b/README.md index 6acd28d..c295d43 100644 --- a/README.md +++ b/README.md @@ -241,8 +241,13 @@ stackql-deploy --help To get started with **stackql-deploy**, install it locally using pip: -``` +```bash +python3 -m venv venv +source venv/bin/activate pip install -e . +# ... +deactivate +rm -rf venv/ ``` ### To Remove the Locally Installed Package diff --git a/cicd/setup/setup-env.sh b/cicd/setup/setup-env.sh new file mode 100755 index 0000000..d66967a --- /dev/null +++ b/cicd/setup/setup-env.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +export REPOSITORY_ROOT="$(realpath ${CURRENT_DIR}/../..)" + +python -m venv ${REPOSITORY_ROOT}/.venv + +source ${REPOSITORY_ROOT}/.venv/bin/activate + +pip install -r ${REPOSITORY_ROOT}/requirements.txt + +cd ${REPOSITORY_ROOT} && python setup.py install + +chmod +x examples/databricks/all-purpose-cluster/sec/*.sh + +pip freeze + diff --git a/examples/databricks/all-purpose-cluster/README.md b/examples/databricks/all-purpose-cluster/README.md new file mode 100644 index 0000000..a1b2088 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/README.md @@ -0,0 +1,252 @@ +# `stackql-deploy` example project for `databricks` + +This exercise is to bootstrap a databricks / aws tenancy using `stackql-deploy`. It is an important use case for platform bootstrap and we are excited to perform it with the `stackql` toolchain. We hope you enjoy and find this valuable. Please drop us a note with your forthright opinion on this and check out our issues on github. + +## A word of caution + +Please take the greatest care in performing this exercise; it will incur expenses, as it involves creating (and destroying) resources which cost money. Please be aware that you **must** cancel your databricks subscription after completing this exercise, otherwise you will incur ongoing expenses. That is, do **not** skip the section [Cancel databricks subscription](#cancel-databricks-subsription). We strongly advise that you verify all resources are destroyed at the conclusion of this exercise. Web pages and certain behaviours may change, so please be thorough in your verification. We will keep this page up-to-date on a best effort basis only. It is very much a case of owner onus applies. + +## Manual Setup + +Dependencies: + +- aws Account Created. +- Required clickops to set up databricks on aws: + - Turn on aws Marketplace `databricks` offering, using [the aws manage subscriptions page](https://console.aws.amazon.com/marketplace/home#/subscriptions), per Figure S1. + - Follow the suggested setup flow as directed, from this page. These clickops steps are necessary at this time for initial account setup. The way I followed this, it created a workspace for me at setup, per Figure S3. We shall not use this one and rather, later on we shall dispose of it; because we do not trust auto-created resources out of hand. In the process of creating the databricks subscription, a second aws account is created. + - Copy the databricks account id from basically any web page in the databricks console. This is done by clicking on the user icon at the top RHS and then the UI provides a copy shortcut, per Fugure U1. Save this locally for later use, expanded below. + - We need the aws account id that was created for the databricks subscription. It is not exactly heralded by the web pages, nor is it actively hidden. It can be captured in a couple of places, including the databricks storage account creatted in the subscription flow, per Figure XA1. copy and save this locally for later use, expanded below. + - Create a service principal to use as a "CICD agent", using the page shown in Figure S4. + - Grant the CICD agent account admin role, using the page shown in Figure S5. + - Create a secret for the CICD agent, using the page shown in Figure S6. At the time you create this, you will need to safely store the client secret and client id, as prompted by the web page. These will be used below. + +Now, is is convenient to use environment variables for context. Note that for our example, there is only one aws account apropos, however this is not always the case for an active professional, so while `DATABRICKS_aws_ACCOUNT_ID` is the same as `aws_ACCOUNT_ID` here, it need not always be the case. Create a file in the path `examples/databricks/all-purpose-cluster/sec/env.sh` (relative to the root of this repository) with contents of the form: + +```bash +#!/usr/bin/env bash + +export ASSETS_aws_REGION='us-east-1' # or wherever you want +export aws_ACCOUNT_ID='' +export DATABRICKS_ACCOUNT_ID='' +export DATABRICKS_aws_ACCOUNT_ID='' + +# These need to be created by clickops under [the account level user managment page](https://accounts.cloud.databricks.com/user-management). +export DATABRICKS_CLIENT_ID='' +export DATABRICKS_CLIENT_SECRET='' + +## These can be skipped if you run on [aws cloud shell](https://docs.aws.amazon.com/cloudshell/latest/userguide/welcome.html). +export aws_SECRET_ACCESS_KEY='' +export aws_ACCESS_KEY_ID='' + +``` + +## Optional step: sanity checks with stackql + +Now, let us do some sanity checks and housekeeping with `stackql`. This is purely optional. From the root of this repository: + +``` + +source examples/databricks/all-purpose-cluster/convenience.sh + +stackql shell + +``` + +This will start a `stackql` interactive shell. Here are some commands you can run (I will not place output here, that will be shared in a corresponding video): + + +```sql + +registry pull databricks_account v24.12.00279; + +registry pull databricks_workspace v24.12.00279; + +-- This will fail if accounts, subscription, or credentials are in error. +select account_id FROM databricks_account.provisioning.credentials WHERE account_id = ''; + + +select account_id, workspace_name, workspace_id, workspace_status from databricks_account.provisioning.workspaces where account_id = ''; + +``` + +For extra credit, you can (asynchronously) delete the unnecessary workspace with `delete from databricks_account.provisioning.workspaces where account_id = '' and workspace_id = '';`, where you obtain the workspace id from the above query. I have noted that due to some reponse caching it takes a while to disappear from select queries (much longer than disappearance from the web page), and you may want to bounce the `stackql` session to hurry things along. This is not happening on the `stackql` side, but session bouncing forces a token refresh which can help cache busting. + +## Lifecycle management + +Time to get down to business. From the root of this repository: + +```bash + +source examples/databricks/all-purpose-cluster/convenience.sh + +source ./.venv/bin/activate + + +``` + +Then, do a dry run (good for catching **some** environmental issues): + +```bash +stackql-deploy build \ +examples/databricks/all-purpose-cluster dev \ +-e aws_REGION=${ASSETS_aws_REGION} \ +-e aws_ACCOUNT_ID=${aws_ACCOUNT_ID} \ +-e DATABRICKS_ACCOUNT_ID=${DATABRICKS_ACCOUNT_ID} \ +-e DATABRICKS_aws_ACCOUNT_ID=${DATABRICKS_aws_ACCOUNT_ID} \ +--dry-run +``` + +You will see a verbose rendition of what `stackql-deploy` intends to do. + + +Now, let use do it for real: + +```bash +stackql-deploy build \ +examples/databricks/all-purpose-cluster dev \ +-e aws_REGION=${ASSETS_aws_REGION} \ +-e aws_ACCOUNT_ID=${aws_ACCOUNT_ID} \ +-e DATABRICKS_ACCOUNT_ID=${DATABRICKS_ACCOUNT_ID} \ +-e DATABRICKS_aws_ACCOUNT_ID=${DATABRICKS_aws_ACCOUNT_ID} \ +--show-queries +``` + +The output is quite verbose, concludes in: + +``` +2025-02-08 12:51:25,914 - stackql-deploy - INFO - 📤 set [databricks_workspace_id] to [482604062392118] in exports +2025-02-08 12:51:25,915 - stackql-deploy - INFO - ✅ successfully deployed databricks_workspace +2025-02-08 12:51:25,915 - stackql-deploy - INFO - deployment completed in 0:04:09.603631 +🚀 build complete +``` + +Success!!! + +We can also use `stackql-deploy` to assess if our infra is shipshape: + +```bash +stackql-deploy test \ +examples/databricks/all-purpose-cluster dev \ +-e aws_REGION=${ASSETS_aws_REGION} \ +-e aws_ACCOUNT_ID=${aws_ACCOUNT_ID} \ +-e DATABRICKS_ACCOUNT_ID=${DATABRICKS_ACCOUNT_ID} \ +-e DATABRICKS_aws_ACCOUNT_ID=${DATABRICKS_aws_ACCOUNT_ID} \ +--show-queries +``` + +Again, the output is quite verbose, concludes in: + +``` +2025-02-08 13:15:45,821 - stackql-deploy - INFO - 📤 set [databricks_workspace_id] to [482604062392118] in exports +2025-02-08 13:15:45,821 - stackql-deploy - INFO - ✅ test passed for databricks_workspace +2025-02-08 13:15:45,821 - stackql-deploy - INFO - deployment completed in 0:02:30.255860 +🔍 tests complete (dry run: False) +``` + +Success!!! + +Now, let us teardown our `stackql-deploy` managed infra: + +```bash +stackql-deploy teardown \ +examples/databricks/all-purpose-cluster dev \ +-e aws_REGION=${ASSETS_aws_REGION} \ +-e aws_ACCOUNT_ID=${aws_ACCOUNT_ID} \ +-e DATABRICKS_ACCOUNT_ID=${DATABRICKS_ACCOUNT_ID} \ +-e DATABRICKS_aws_ACCOUNT_ID=${DATABRICKS_aws_ACCOUNT_ID} \ +--show-queries +``` + +Takes its time, again verbose, concludes in: + +``` +2025-02-08 13:24:17,941 - stackql-deploy - INFO - ✅ successfully deleted aws_iam_cross_account_role +2025-02-08 13:24:17,942 - stackql-deploy - INFO - deployment completed in 0:03:21.191788 +🚧 teardown complete (dry run: False) +``` + +Success!!! + +## Optional step: verify destruction with stackql + +Now, let us do some sanity checks and housekeeping with `stackql`. This is purely optional. From the root of this repository: + +``` + +source examples/databricks/all-purpose-cluster/convenience.sh + +stackql shell + +``` + +This will start a `stackql` interactive shell. Here are some commands you can run (I will not place output here): + + +```sql + +registry pull databricks_account v24.12.00279; + +registry pull databricks_workspace v24.12.00279; + + + +select account_id, workspace_name, workspace_id, workspace_status from databricks_account.provisioning.workspaces where account_id = ''; + +``` + +## Cancel databricks subsription + +This is **very** important. + +Go to [the aws Marketplace manage subscriptions page](https://console.aws.amazon.com/marketplace/home#/subscriptions), navigate to databricks and then cancel the subscription. + +## Figures + + +![Create aws databricks subscription](/examples/databricks/all-purpose-cluster/assets/create-aws-databricks-subscription.png) + +**Figure S1**: Create aws databricks subscription. + +--- + +![Awaiting aws databricks subscription resources](/examples/databricks/all-purpose-cluster/assets/awaiting-subscription-resources.png) + +**Figure S2**: Awaiting aws databricks subscription resources. + +--- + +![Auto provisioned workspace](/examples/databricks/all-purpose-cluster/assets/auto-provisioned-worskpace.png) + +**Figure S3**: Auto provisioned workspace. + +--- + +![Capture databricks account id](/examples/databricks/all-purpose-cluster/assets/capture-databricks-account-id.png) + +**Figure U1**: Capture databricks account id. + +--- + +![Capture cross databricks aws account id](/examples/databricks/all-purpose-cluster/assets/capture-cross-databricks-aws-account-id.png) + +**Figure XA1**: Capture cross databricks aws account id. + +--- + +![Create CICD agent](/examples/databricks/all-purpose-cluster/assets/create-cicd-agent.png) + +**Figure S4**: Create CICD agent. + +--- + +![Grant account admin to CICD agent](/examples/databricks/all-purpose-cluster/assets/grant-account-admin-cicd-agent.png) + +**Figure S5**: Grant account admin to CICD agent. + +--- + +![Generate secret for CICD agent](/examples/databricks/all-purpose-cluster/assets/generate-secret-ui.png) + +**Figure S6**: Generate secret for CICD agent. + +--- diff --git a/examples/databricks/all-purpose-cluster/assets/auto-provisioned-worskpace.png b/examples/databricks/all-purpose-cluster/assets/auto-provisioned-worskpace.png new file mode 100644 index 0000000..a9fbcb6 Binary files /dev/null and b/examples/databricks/all-purpose-cluster/assets/auto-provisioned-worskpace.png differ diff --git a/examples/databricks/all-purpose-cluster/assets/awaiting-subscription-resources.png b/examples/databricks/all-purpose-cluster/assets/awaiting-subscription-resources.png new file mode 100644 index 0000000..9505100 Binary files /dev/null and b/examples/databricks/all-purpose-cluster/assets/awaiting-subscription-resources.png differ diff --git a/examples/databricks/all-purpose-cluster/assets/capture-cross-databricks-aws-account-id.png b/examples/databricks/all-purpose-cluster/assets/capture-cross-databricks-aws-account-id.png new file mode 100644 index 0000000..6fdb3c4 Binary files /dev/null and b/examples/databricks/all-purpose-cluster/assets/capture-cross-databricks-aws-account-id.png differ diff --git a/examples/databricks/all-purpose-cluster/assets/capture-databricks-account-id.png b/examples/databricks/all-purpose-cluster/assets/capture-databricks-account-id.png new file mode 100644 index 0000000..c890299 Binary files /dev/null and b/examples/databricks/all-purpose-cluster/assets/capture-databricks-account-id.png differ diff --git a/examples/databricks/all-purpose-cluster/assets/create-aws-databricks-subscription.png b/examples/databricks/all-purpose-cluster/assets/create-aws-databricks-subscription.png new file mode 100644 index 0000000..b5c9e7f Binary files /dev/null and b/examples/databricks/all-purpose-cluster/assets/create-aws-databricks-subscription.png differ diff --git a/examples/databricks/all-purpose-cluster/assets/create-cicd-agent.png b/examples/databricks/all-purpose-cluster/assets/create-cicd-agent.png new file mode 100644 index 0000000..faf1643 Binary files /dev/null and b/examples/databricks/all-purpose-cluster/assets/create-cicd-agent.png differ diff --git a/examples/databricks/all-purpose-cluster/assets/generate-secret-ui.png b/examples/databricks/all-purpose-cluster/assets/generate-secret-ui.png new file mode 100644 index 0000000..daf4f23 Binary files /dev/null and b/examples/databricks/all-purpose-cluster/assets/generate-secret-ui.png differ diff --git a/examples/databricks/all-purpose-cluster/assets/grant-account-admin-cicd-agent.png b/examples/databricks/all-purpose-cluster/assets/grant-account-admin-cicd-agent.png new file mode 100644 index 0000000..f50e0c0 Binary files /dev/null and b/examples/databricks/all-purpose-cluster/assets/grant-account-admin-cicd-agent.png differ diff --git a/examples/databricks/all-purpose-cluster/convenience.sh b/examples/databricks/all-purpose-cluster/convenience.sh new file mode 100644 index 0000000..81f73c7 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/convenience.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +export REPOSITORY_ROOT="$(realpath $CURRENT_DIR/../../..)" + + +if [ -f "${REPOSITORY_ROOT}/examples/databricks/all-purpose-cluster/sec/env.sh" ]; +then + source "${REPOSITORY_ROOT}/examples/databricks/all-purpose-cluster/sec/env.sh" +fi + +if [ "${ASSETS_AWS_REGION}" = "" ]; +then + ASSETS_AWS_REGION='us-east-1' +fi + +if [ "${AWS_ACCOUNT_ID}" = "" ]; +then + echo "AWS_ACCOUNT_ID must be set" >&2 + exit 1s +fi + +if [ "${DATABRICKS_ACCOUNT_ID}" = "" ]; +then + echo "DATABRICKS_ACCOUNT_ID must be set" >&2 + exit 1 +fi + +if [ "${DATABRICKS_AWS_ACCOUNT_ID}" = "" ]; +then + echo "DATABRICKS_AWS_ACCOUNT_ID must be set" >&2 + exit 1 +fi + +if [ "${DATABRICKS_CLIENT_ID}" = "" ]; +then + echo "DATABRICKS_CLIENT_ID must be set" >&2 + exit 1 +fi + +if [ "${DATABRICKS_CLIENT_SECRET}" = "" ]; +then + echo "DATABRICKS_CLIENT_SECRET must be set" >&2 + exit 1 +fi + +if [ "${AWS_SECRET_ACCESS_KEY}" = "" ]; +then + echo "AWS_SECRET_ACCESS_KEY must be set" >&2 + exit 1 +fi + +if [ "${AWS_ACCESS_KEY_ID}" = "" ]; +then + echo "AWS_ACCESS_KEY_ID must be set" >&2 + exit 1 +fi + +export ASSETS_AWS_REGION +export AWS_ACCOUNT_ID +export DATABRICKS_ACCOUNT_ID +export DATABRICKS_AWS_ACCOUNT_ID + +export DATABRICKS_CLIENT_ID +export DATABRICKS_CLIENT_SECRET + + +export AWS_SECRET_ACCESS_KEY +export AWS_ACCESS_KEY_ID + + diff --git a/examples/databricks/all-purpose-cluster/resources/aws_get_main_route_table_id.iql b/examples/databricks/all-purpose-cluster/resources/aws_get_main_route_table_id.iql new file mode 100644 index 0000000..72595ff --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_get_main_route_table_id.iql @@ -0,0 +1,6 @@ +/*+ exports, retries=3, retry_delay=5 */ +SELECT +route_table_id +FROM aws.ec2.route_tables +WHERE region = '{{ region }}' +AND vpc_id = '{{ vpc_id }}'; \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/aws_iam_cross_account_role.iql b/examples/databricks/all-purpose-cluster/resources/aws_iam_cross_account_role.iql new file mode 100644 index 0000000..f7e8750 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_iam_cross_account_role.iql @@ -0,0 +1,59 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM aws.iam.roles +WHERE data__Identifier = '{{ role_name }}' + +/*+ create */ +INSERT INTO aws.iam.roles ( + RoleName, + Description, + Path, + AssumeRolePolicyDocument, + Policies, + Tags, + region +) +SELECT +'{{ role_name }}', +'{{ description }}', +'{{ path }}', +'{{ assume_role_policy_document }}', +'{{ policies }}', +'{{ global_tags }}', +'us-east-1' + +/*+ update */ +update aws.iam.roles +set data__PatchDocument = string('{{ { + "Description": description, + "Path": path, + "AssumeRolePolicyDocument": assume_role_policy_document, + "Policies": policies, + "Tags": global_tags + } | generate_patch_document }}') +WHERE data__Identifier = '{{ role_name }}' +AND region = 'us-east-1'; + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count FROM ( + SELECT + max_session_duration, + path, + JSON_EQUAL(assume_role_policy_document, '{{ assume_role_policy_document }}') as test_assume_role_policy_doc, + JSON_EQUAL(policies, '{{ policies }}') as test_policies + FROM aws.iam.roles + WHERE data__Identifier = '{{ role_name }}')t +WHERE test_assume_role_policy_doc = 1 +AND test_policies = 1 +AND path = '{{ path }}'; + +/*+ exports, retries=3, retry_delay=5 */ +SELECT +arn as aws_iam_cross_account_role_arn +FROM aws.iam.roles +WHERE data__Identifier = '{{ role_name }}' + +/*+ delete */ +DELETE FROM aws.iam.roles +WHERE data__Identifier = '{{ role_name }}' +AND region = 'us-east-1' \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/aws_s3_workspace_bucket.iql b/examples/databricks/all-purpose-cluster/resources/aws_s3_workspace_bucket.iql new file mode 100644 index 0000000..a20c908 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_s3_workspace_bucket.iql @@ -0,0 +1,61 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM aws.s3.buckets +WHERE region = '{{ region }}' +AND data__Identifier = '{{ bucket_name }}' + +/*+ create */ +INSERT INTO aws.s3.buckets ( + BucketName, + OwnershipControls, + BucketEncryption, + PublicAccessBlockConfiguration, + VersioningConfiguration, + Tags, + region +) +SELECT + '{{ bucket_name }}', + '{{ ownership_controls }}', + '{{ bucket_encryption }}', + '{{ public_access_block_configuration }}', + '{{ versioning_configuration }}', + '{{ global_tags }}', + '{{ region }}' + +/*+ update */ +update aws.s3.buckets +set data__PatchDocument = string('{{ { + "OwnershipControls": ownership_controls, + "BucketEncryption": bucket_encryption, + "PublicAccessBlockConfiguration": public_access_block_configuration, + "VersioningConfiguration": versioning_configuration, + "Tags": global_tags + } | generate_patch_document }}') +WHERE +region = '{{ region }}' +AND data__Identifier = '{{ bucket_name }}' + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count FROM ( + SELECT + JSON_EQUAL(ownership_controls, '{{ ownership_controls }}') as test_ownership_controls, + JSON_EQUAL(bucket_encryption, '{{ bucket_encryption }}') as test_encryption, + JSON_EQUAL(public_access_block_configuration, '{{ public_access_block_configuration }}') as test_public_access_block_configuration, + JSON_EQUAL(versioning_configuration, '{{ versioning_configuration }}') as test_versioning_configuration + FROM aws.s3.buckets + WHERE region = '{{ region }}' + AND data__Identifier = '{{ bucket_name }}' +)t +WHERE test_ownership_controls = 1 +AND test_encryption = 1 +AND test_public_access_block_configuration = 1 +AND test_versioning_configuration = 1 + +/*+ exports, retries=3, retry_delay=5 */ +SELECT +arn as aws_s3_workspace_bucket_arn, +bucket_name as aws_s3_workspace_bucket_name +FROM aws.s3.buckets +WHERE region = '{{ region }}' +AND data__Identifier = '{{ bucket_name }}' diff --git a/examples/databricks/all-purpose-cluster/resources/aws_s3_workspace_bucket_policy.iql b/examples/databricks/all-purpose-cluster/resources/aws_s3_workspace_bucket_policy.iql new file mode 100644 index 0000000..cead151 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_s3_workspace_bucket_policy.iql @@ -0,0 +1,36 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM aws.s3.bucket_policies +WHERE region = '{{ region }}' +AND bucket = '{{ aws_s3_workspace_bucket_name }}'; + +/*+ create */ +INSERT INTO aws.s3.bucket_policies ( + Bucket, + PolicyDocument, + ClientToken, + region +) +SELECT + '{{ aws_s3_workspace_bucket_name }}', + '{{ policy_document }}', + '{{ uuid() }}', + '{{ region }}' + +/*+ update */ +update aws.s3.bucket_policies +set data__PatchDocument = string('{{ { + "PolicyDocument": policy_document + } | generate_patch_document }}') +WHERE +region = '{{ region }}' +AND data__Identifier = '{{ aws_s3_workspace_bucket_name }}'; + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count FROM ( + SELECT + JSON_EQUAL(policy_document, '{{ policy_document }}') as test_policy_document + FROM aws.s3.bucket_policies + WHERE region = '{{ region }}' + AND data__Identifier = '{{ aws_s3_workspace_bucket_name }}')t +WHERE test_policy_document = 1; diff --git a/examples/databricks/all-purpose-cluster/resources/aws_tag_main_vpc_route_table.iql b/examples/databricks/all-purpose-cluster/resources/aws_tag_main_vpc_route_table.iql new file mode 100644 index 0000000..f307613 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_tag_main_vpc_route_table.iql @@ -0,0 +1,7 @@ +/*+ command */ +update aws.ec2.route_tables +set data__PatchDocument = string('{{ { + "Tags": tags + } | generate_patch_document }}') +WHERE region = '{{ region }}' +AND data__Identifier = '{{ route_table_id }}'; \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/aws_vpc.iql b/examples/databricks/all-purpose-cluster/resources/aws_vpc.iql new file mode 100644 index 0000000..56e1c54 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_vpc.iql @@ -0,0 +1,60 @@ +/*+ exists */ +SELECT COUNT(*) as count FROM +( + SELECT vpc_id, + json_group_object(tag_key, tag_value) as tags + FROM aws.ec2.vpc_tags + WHERE region = '{{ region }}' + AND cidr_block = '{{ cidr_block }}' + GROUP BY vpc_id + HAVING json_extract(tags, '$.Provisioner') = 'stackql' + AND json_extract(tags, '$.StackName') = '{{ stack_name }}' + AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' +) t + +/*+ create */ +INSERT INTO aws.ec2.vpcs ( + CidrBlock, + Tags, + EnableDnsSupport, + EnableDnsHostnames, + ClientToken, + region +) +SELECT + '{{ cidr_block }}', + '{{ tags }}', + true, + true, + '{{ idempotency_token }}', + '{{ region }}' + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( + SELECT vpc_id, + json_group_object(tag_key, tag_value) as tags + FROM aws.ec2.vpc_tags + WHERE region = '{{ region }}' + AND cidr_block = '{{ cidr_block }}' + GROUP BY vpc_id + HAVING json_extract(tags, '$.Provisioner') = 'stackql' + AND json_extract(tags, '$.StackName') = '{{ stack_name }}' + AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' +) t + +/*+ exports, retries=3, retry_delay=5 */ +SELECT vpc_id, +json_group_object(tag_key, tag_value) as tags +FROM aws.ec2.vpc_tags +WHERE region = '{{ region }}' +AND cidr_block = '{{ cidr_block }}' +GROUP BY vpc_id +HAVING json_extract(tags, '$.Provisioner') = 'stackql' +AND json_extract(tags, '$.StackName') = '{{ stack_name }}' +AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' + +/*+ delete */ +DELETE FROM aws.ec2.vpcs +WHERE data__Identifier = '{{ vpc_id}}' +AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/aws_vpc_inet_gateway.iql b/examples/databricks/all-purpose-cluster/resources/aws_vpc_inet_gateway.iql new file mode 100644 index 0000000..dc42032 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_vpc_inet_gateway.iql @@ -0,0 +1,54 @@ +/*+ exists */ +SELECT COUNT(*) as count FROM +( +SELECT internet_gateway_id, +json_group_object(tag_key, tag_value) as tags +FROM aws.ec2.internet_gateway_tags +WHERE region = '{{ region }}' +GROUP BY internet_gateway_id +HAVING json_extract(tags, '$.Provisioner') = 'stackql' +AND json_extract(tags, '$.StackName') = '{{ stack_name }}' +AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' +) t + +/*+ create */ +INSERT INTO aws.ec2.internet_gateways ( + Tags, + ClientToken, + region +) +SELECT +'{{ tags }}', +'{{ idempotency_token }}', +'{{ region }}'; + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT internet_gateway_id, +json_group_object(tag_key, tag_value) as tags +FROM aws.ec2.internet_gateway_tags +WHERE region = '{{ region }}' +GROUP BY internet_gateway_id +HAVING json_extract(tags, '$.Provisioner') = 'stackql' +AND json_extract(tags, '$.StackName') = '{{ stack_name }}' +AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' +) t + +/*+ exports, retries=3, retry_delay=5 */ +SELECT internet_gateway_id FROM +( +SELECT internet_gateway_id, +json_group_object(tag_key, tag_value) as tags +FROM aws.ec2.internet_gateway_tags +WHERE region = '{{ region }}' +GROUP BY internet_gateway_id +HAVING json_extract(tags, '$.Provisioner') = 'stackql' +AND json_extract(tags, '$.StackName') = '{{ stack_name }}' +AND json_extract(tags, '$.StackEnv') = '{{ stack_env }}' +) t; + +/*+ delete */ +DELETE FROM aws.ec2.internet_gateways +WHERE data__Identifier = '{{ internet_gateway_id }}' +AND region = '{{ region }}'; diff --git a/examples/databricks/all-purpose-cluster/resources/aws_vpc_inet_gw_attachment.iql b/examples/databricks/all-purpose-cluster/resources/aws_vpc_inet_gw_attachment.iql new file mode 100644 index 0000000..31b9d25 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_vpc_inet_gw_attachment.iql @@ -0,0 +1,39 @@ +/*+ exists */ +SELECT COUNT(*) as count FROM +( +SELECT +attachment_type, +vpc_id +FROM aws.ec2.vpc_gateway_attachments +WHERE region = '{{ region }}' +AND internet_gateway_id = '{{ internet_gateway_id }}' +AND vpc_id = '{{ vpc_id }}' +) t + +/*+ create */ +INSERT INTO aws.ec2.vpc_gateway_attachments ( + InternetGatewayId, + VpcId, + region +) +SELECT + '{{ internet_gateway_id }}', + '{{ vpc_id }}', + '{{ region }}'; + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT +attachment_type, +vpc_id +FROM aws.ec2.vpc_gateway_attachments +WHERE region = '{{ region }}' +AND internet_gateway_id = '{{ internet_gateway_id }}' +AND vpc_id = '{{ vpc_id }}' +) t + +/*+ delete */ +DELETE FROM aws.ec2.vpc_gateway_attachments +WHERE data__Identifier = 'IGW|{{ vpc_id }}' +AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/aws_vpc_inet_route.iql b/examples/databricks/all-purpose-cluster/resources/aws_vpc_inet_route.iql new file mode 100644 index 0000000..b46cc0f --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_vpc_inet_route.iql @@ -0,0 +1,41 @@ +/*+ exists */ +SELECT COUNT(*) as count FROM +( +SELECT data__Identifier +FROM aws.ec2.routes +WHERE region = '{{ region }}' +AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0' +) t + +/*+ create */ +INSERT INTO aws.ec2.routes ( + DestinationCidrBlock, + GatewayId, + RouteTableId, + region +) +SELECT + '0.0.0.0/0', + '{{ internet_gateway_id }}', + '{{ route_table_id }}', + '{{ region }}'; + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT data__Identifier +FROM aws.ec2.routes +WHERE region = '{{ region }}' +AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0' +) t + +/*+ exports, retries=3, retry_delay=5 */ +SELECT data__Identifier as inet_route_indentifer +FROM aws.ec2.routes +WHERE region = '{{ region }}' +AND data__Identifier = '{{ route_table_id }}|0.0.0.0/0'; + +/*+ delete */ +DELETE FROM aws.ec2.routes +WHERE data__Identifier = '{{ inet_route_indentifer }}' +AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/aws_vpc_security_group.iql b/examples/databricks/all-purpose-cluster/resources/aws_vpc_security_group.iql new file mode 100644 index 0000000..15e9061 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_vpc_security_group.iql @@ -0,0 +1,41 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM aws.ec2.security_groups +WHERE region = '{{ region }}' +AND vpc_id = '{{ vpc_id }}' +AND group_name = '{{ group_name }}' + +/*+ create */ +INSERT INTO aws.ec2.security_groups ( + GroupName, + GroupDescription, + VpcId, + Tags, + region +) +SELECT + '{{ group_name }}', + '{{ group_description }}', + '{{ vpc_id }}', + '{{ tags }}', + '{{ region }}'; + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count +FROM aws.ec2.security_groups +WHERE region = '{{ region }}' +AND vpc_id = '{{ vpc_id }}' +AND group_name = '{{ group_name }}' +AND group_description = '{{ group_description }}' + +/*+ exports, retries=3, retry_delay=5 */ +SELECT group_id as security_group_id +FROM aws.ec2.security_groups +WHERE region = '{{ region }}' +AND vpc_id = '{{ vpc_id }}' +AND group_name = '{{ group_name }}' + +/*+ delete */ +DELETE FROM aws.ec2.security_groups +WHERE data__Identifier = '{{ security_group_id }}' +AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/aws_vpc_security_group_rules.iql b/examples/databricks/all-purpose-cluster/resources/aws_vpc_security_group_rules.iql new file mode 100644 index 0000000..62f79eb --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_vpc_security_group_rules.iql @@ -0,0 +1,27 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM aws.ec2.security_groups +WHERE region = '{{ region }}' +AND data__Identifier = '{{ security_group_id }}' + +/*+ createorupdate */ +update aws.ec2.security_groups +set data__PatchDocument = string('{{ { + "SecurityGroupIngress": security_group_ingress, + "SecurityGroupEgress": security_group_egress + } | generate_patch_document }}') +WHERE region = '{{ region }}' +AND data__Identifier = '{{ security_group_id }}' + +/*+ statecheck, retries=5, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( + SELECT + JSON_EQUAL(security_group_ingress, '{{ security_group_ingress }}') as ingress_test, + JSON_EQUAL(security_group_egress, '{{ security_group_egress }}') as egress_test + FROM aws.ec2.security_groups + WHERE region = '{{ region }}' + AND data__Identifier = '{{ security_group_id }}' + AND ingress_test = 1 + AND egress_test = 1 +) t; diff --git a/examples/databricks/all-purpose-cluster/resources/aws_vpc_subnet.iql b/examples/databricks/all-purpose-cluster/resources/aws_vpc_subnet.iql new file mode 100644 index 0000000..83667f5 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_vpc_subnet.iql @@ -0,0 +1,43 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM aws.ec2.subnets +WHERE region = '{{ region }}' +AND vpc_id = '{{ vpc_id }}' +AND cidr_block = '{{ cidr_block }}' + +/*+ create */ +INSERT INTO aws.ec2.subnets ( + VpcId, + CidrBlock, + AvailabilityZone, + MapPublicIpOnLaunch, + Tags, + region +) +SELECT + '{{ vpc_id }}', + '{{ cidr_block }}', + '{{ availability_zone }}', + false, + '{{ tags }}', + '{{ region }}'; + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count +FROM aws.ec2.subnets +WHERE region = '{{ region }}' +AND vpc_id = '{{ vpc_id }}' +AND cidr_block = '{{ cidr_block }}' +AND availability_zone = '{{ availability_zone }}'; + +/*+ exports, retries=3, retry_delay=5 */ +SELECT subnet_id +FROM aws.ec2.subnets +WHERE region = '{{ region }}' +AND vpc_id = '{{ vpc_id }}' +AND cidr_block = '{{ cidr_block }}'; + +/*+ delete */ +DELETE FROM aws.ec2.subnets +WHERE data__Identifier = '{{ subnet_id }}' +AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/aws_vpc_subnet_rt_assn.iql b/examples/databricks/all-purpose-cluster/resources/aws_vpc_subnet_rt_assn.iql new file mode 100644 index 0000000..d0c8b33 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/aws_vpc_subnet_rt_assn.iql @@ -0,0 +1,34 @@ +/*+ exists */ +select regexp_like(associationSet, '.*{{ subnet_id }}.*') as count from +aws.ec2_native.route_tables where region = '{{ region }}' +and routeTableId = '{{ route_table_id }}' + +/*+ create */ +INSERT INTO aws.ec2.subnet_route_table_associations ( + RouteTableId, + SubnetId, + ClientToken, + region +) +SELECT + '{{ route_table_id }}', + '{{ subnet_id }}', + '{{ idempotency_token }}', + '{{ region }}'; + +/*+ statecheck, retries=5, retry_delay=5 */ +select regexp_like(associationSet, '.*{{ subnet_id }}.*') as count from +aws.ec2_native.route_tables where region = '{{ region }}' +and routeTableId = '{{ route_table_id }}' + +/*+ exports, retries=5, retry_delay=5 */ +SELECT id as route_table_assn_id +FROM aws.ec2.subnet_route_table_associations +WHERE region = '{{ region }}' +AND route_table_id = '{{ route_table_id }}' +AND subnet_id = '{{ subnet_id }}'; + +/*+ delete */ +DELETE FROM aws.ec2.subnet_route_table_associations +WHERE data__Identifier = '{{ route_table_assn_id }}' +AND region = '{{ region }}'; \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/databricks_credentials.iql b/examples/databricks/all-purpose-cluster/resources/databricks_credentials.iql new file mode 100644 index 0000000..d83abc6 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/databricks_credentials.iql @@ -0,0 +1,39 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM databricks_account.provisioning.credentials +WHERE account_id = '{{ databricks_account_id }}' +AND credentials_name = '{{ credentials_name }}' + +/*+ create */ +INSERT INTO databricks_account.provisioning.credentials ( +account_id, +data__credentials_name, +data__aws_credentials +) +SELECT +'{{ databricks_account_id }}', +'{{ credentials_name }}', +'{{ aws_credentials }}' + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT +credentials_id +FROM databricks_account.provisioning.credentials +WHERE account_id = '{{ databricks_account_id }}' +AND credentials_name = '{{ credentials_name }}' +AND JSON_EXTRACT(aws_credentials, '$.sts_role.role_arn') = '{{ aws_iam_cross_account_role_arn }}' +) t + +/*+ exports */ +SELECT credentials_id as databricks_credentials_id, +JSON_EXTRACT(aws_credentials, '$.sts_role.external_id') as databricks_role_external_id +FROM databricks_account.provisioning.credentials +WHERE account_id = '{{ databricks_account_id }}' +AND credentials_name = '{{ credentials_name }}' + +/*+ delete */ +DELETE FROM databricks_account.provisioning.credentials +WHERE account_id = '{{ databricks_account_id }}' AND +credentials_id = '{{ databricks_credentials_id }}'; \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/databricks_network.iql b/examples/databricks/all-purpose-cluster/resources/databricks_network.iql new file mode 100644 index 0000000..45e0b0a --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/databricks_network.iql @@ -0,0 +1,46 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM databricks_account.provisioning.networks +WHERE account_id = '{{ databricks_account_id }}' +AND network_name = '{{ databricks_network_name }}' + +/*+ create */ +INSERT INTO databricks_account.provisioning.networks ( +account_id, +data__network_name, +data__vpc_id, +data__subnet_ids, +data__security_group_ids +) +SELECT +'{{ databricks_account_id }}', +'{{ databricks_network_name }}', +'{{ vpc_id }}', +'{{ subnet_ids }}', +'{{ security_group_ids }}' + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count FROM +( +SELECT +JSON_EQUAL(subnet_ids, '{{ subnet_ids }}') as subnet_test, +JSON_EQUAL(security_group_ids, '{{ security_group_ids }}') as sg_test +FROM databricks_account.provisioning.networks +WHERE account_id = '{{ databricks_account_id }}' +AND network_name = '{{ databricks_network_name }}' +AND vpc_id = '{{ vpc_id }}' +AND subnet_test = 1 +AND sg_test = 1 +)t + +/*+ exports */ +SELECT +network_id as databricks_network_id +FROM databricks_account.provisioning.networks +WHERE account_id = '{{ databricks_account_id }}' AND +network_name = '{{ databricks_network_name }}' + +/*+ delete */ +DELETE FROM databricks_account.provisioning.networks +WHERE account_id = '{{ databricks_account_id }}' AND +network_id = '{{ databricks_network_id }}' \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/databricks_storage_configuration.iql b/examples/databricks/all-purpose-cluster/resources/databricks_storage_configuration.iql new file mode 100644 index 0000000..4e60cfc --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/databricks_storage_configuration.iql @@ -0,0 +1,35 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM databricks_account.provisioning.storage +WHERE account_id = '{{ databricks_account_id }}' +AND storage_configuration_name = '{{ storage_configuration_name }}' + +/*+ create */ +INSERT INTO databricks_account.provisioning.storage ( +account_id, +data__storage_configuration_name, +data__root_bucket_info +) +SELECT +'{{ databricks_account_id }}', +'{{ storage_configuration_name }}', +'{{ root_bucket_info }}' + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count +FROM databricks_account.provisioning.storage +WHERE account_id = '{{ databricks_account_id }}' +AND storage_configuration_name = '{{ storage_configuration_name }}' +AND JSON_EXTRACT(root_bucket_info, '$.bucket_name') = '{{ aws_s3_workspace_bucket_name }}' + +/*+ exports */ +SELECT +storage_configuration_id as databricks_storage_configuration_id +FROM databricks_account.provisioning.storage +WHERE account_id = '{{ databricks_account_id }}' +AND storage_configuration_name = '{{ storage_configuration_name }}' + +/*+ delete */ +DELETE FROM databricks_account.provisioning.storage +WHERE account_id = '{{ databricks_account_id }}' AND +storage_configuration_id = '{{ databricks_storage_configuration_id }}' \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/resources/databricks_workspace.iql b/examples/databricks/all-purpose-cluster/resources/databricks_workspace.iql new file mode 100644 index 0000000..8ba84f7 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/resources/databricks_workspace.iql @@ -0,0 +1,46 @@ +/*+ exists */ +SELECT COUNT(*) as count +FROM databricks_account.provisioning.workspaces +WHERE account_id = '{{ databricks_account_id }}' +AND workspace_name = '{{ workspace_name }}' + +/*+ create */ +INSERT INTO databricks_account.provisioning.workspaces ( +account_id, +data__workspace_name, +data__network_id, +data__aws_region, +data__credentials_id, +data__storage_configuration_id, +data__pricing_tier +) +SELECT +'{{ databricks_account_id }}', +'{{ workspace_name }}', +'{{ network_id }}', +'{{ aws_region }}', +'{{ credentials_id }}', +'{{ storage_configuration_id }}', +'{{ pricing_tier }}' + +/*+ statecheck, retries=3, retry_delay=5 */ +SELECT COUNT(*) as count +FROM databricks_account.provisioning.workspaces +WHERE account_id = '{{ databricks_account_id }}' +AND workspace_name = '{{ workspace_name }}' +AND network_id = '{{ network_id }}' +AND aws_region = '{{ aws_region }}' +AND credentials_id = '{{ credentials_id }}' +AND storage_configuration_id = '{{ storage_configuration_id }}' +AND pricing_tier = '{{ pricing_tier }}' + +/*+ exports */ +SELECT workspace_id AS databricks_workspace_id +FROM databricks_account.provisioning.workspaces +WHERE account_id = '{{ databricks_account_id }}' +AND workspace_name = '{{ workspace_name }}' + +/*+ delete */ +DELETE FROM databricks_account.provisioning.workspaces +WHERE account_id = '{{ databricks_account_id }}' AND +workspace_id = '{{ databricks_workspace_id }}' \ No newline at end of file diff --git a/examples/databricks/all-purpose-cluster/sec/.gitignore b/examples/databricks/all-purpose-cluster/sec/.gitignore new file mode 100644 index 0000000..d6b7ef3 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/sec/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/examples/databricks/all-purpose-cluster/stackql_manifest.yml b/examples/databricks/all-purpose-cluster/stackql_manifest.yml new file mode 100644 index 0000000..4d1d463 --- /dev/null +++ b/examples/databricks/all-purpose-cluster/stackql_manifest.yml @@ -0,0 +1,394 @@ +version: 1 +name: "databricks-all-purpose-cluster" +description: creates a databricks workspace and all-purpose cluster +providers: + - aws + - databricks_account + - databricks_workspace +globals: + - name: databricks_account_id + description: databricks account id + value: "{{ DATABRICKS_ACCOUNT_ID }}" + - name: databricks_aws_account_id + description: databricks AWS account id + value: "{{ DATABRICKS_AWS_ACCOUNT_ID }}" + - name: aws_account + description: aws_account id + value: "{{ AWS_ACCOUNT_ID }}" + - name: region + description: aws region + value: "{{ AWS_REGION }}" + - name: global_tags + value: + - Key: Provisioner + Value: stackql + - Key: StackName + Value: "{{ stack_name }}" + - Key: StackEnv + Value: "{{ stack_env }}" +resources: +# ==================================================================================== +# AWS IAM +# ==================================================================================== + - name: aws_iam_cross_account_role + props: + - name: role_name + value: "{{ stack_name }}-{{ stack_env }}-role" + - name: assume_role_policy_document + value: + Version: "2012-10-17" + Statement: + - Sid: "" + Effect: "Allow" + Principal: + AWS: "arn:aws:iam::{{ databricks_aws_account_id }}:root" + Action: "sts:AssumeRole" + Condition: + StringEquals: + sts:ExternalId: "{{ databricks_account_id }}" + - name: description + value: 'allows Databricks to access resources in ({{ stack_name }}-{{ stack_env }})' + - name: path + value: '/' + - name: policies + value: + - PolicyDocument: + Statement: + - Sid: Stmt1403287045000 + Effect: Allow + Action: + - "ec2:AllocateAddress" + - "ec2:AssociateDhcpOptions" + - "ec2:AssociateIamInstanceProfile" + - "ec2:AssociateRouteTable" + - "ec2:AttachInternetGateway" + - "ec2:AttachVolume" + - "ec2:AuthorizeSecurityGroupEgress" + - "ec2:AuthorizeSecurityGroupIngress" + - "ec2:CancelSpotInstanceRequests" + - "ec2:CreateDhcpOptions" + - "ec2:CreateInternetGateway" + - "ec2:CreateKeyPair" + - "ec2:CreateNatGateway" + - "ec2:CreatePlacementGroup" + - "ec2:CreateRoute" + - "ec2:CreateRouteTable" + - "ec2:CreateSecurityGroup" + - "ec2:CreateSubnet" + - "ec2:CreateTags" + - "ec2:CreateVolume" + - "ec2:CreateVpc" + - "ec2:CreateVpcEndpoint" + - "ec2:DeleteDhcpOptions" + - "ec2:DeleteInternetGateway" + - "ec2:DeleteKeyPair" + - "ec2:DeleteNatGateway" + - "ec2:DeletePlacementGroup" + - "ec2:DeleteRoute" + - "ec2:DeleteRouteTable" + - "ec2:DeleteSecurityGroup" + - "ec2:DeleteSubnet" + - "ec2:DeleteTags" + - "ec2:DeleteVolume" + - "ec2:DeleteVpc" + - "ec2:DeleteVpcEndpoints" + - "ec2:DescribeAvailabilityZones" + - "ec2:DescribeIamInstanceProfileAssociations" + - "ec2:DescribeInstanceStatus" + - "ec2:DescribeInstances" + - "ec2:DescribeInternetGateways" + - "ec2:DescribeNatGateways" + - "ec2:DescribePlacementGroups" + - "ec2:DescribePrefixLists" + - "ec2:DescribeReservedInstancesOfferings" + - "ec2:DescribeRouteTables" + - "ec2:DescribeSecurityGroups" + - "ec2:DescribeSpotInstanceRequests" + - "ec2:DescribeSpotPriceHistory" + - "ec2:DescribeSubnets" + - "ec2:DescribeVolumes" + - "ec2:DescribeVpcs" + - "ec2:DescribeVpcAttribute" + - "ec2:DescribeNetworkAcls" + - "ec2:DetachInternetGateway" + - "ec2:DisassociateIamInstanceProfile" + - "ec2:DisassociateRouteTable" + - "ec2:ModifyVpcAttribute" + - "ec2:ReleaseAddress" + - "ec2:ReplaceIamInstanceProfileAssociation" + - "ec2:ReplaceRoute" + - "ec2:RequestSpotInstances" + - "ec2:RevokeSecurityGroupEgress" + - "ec2:RevokeSecurityGroupIngress" + - "ec2:RunInstances" + - "ec2:TerminateInstances" + Resource: + - "*" + - Effect: Allow + Action: + - "iam:CreateServiceLinkedRole" + - "iam:PutRolePolicy" + Resource: + - arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot + Condition: + StringLike: + "iam:AWSServiceName": spot.amazonaws.com + Version: '2012-10-17' + PolicyName: "{{ stack_name }}-{{ stack_env }}-policy" + exports: + - aws_iam_cross_account_role_arn + - name: databricks_credentials + props: + - name: credentials_name + value: "{{ stack_name }}-{{ stack_env }}-credentials" + - name: aws_credentials + value: + sts_role: + role_arn: "{{ aws_iam_cross_account_role_arn }}" + exports: + - databricks_credentials_id + - databricks_role_external_id +# ==================================================================================== +# AWS VPC Networking +# ==================================================================================== + - name: aws_vpc + props: + - name: cidr_block + values: + prd: + value: "10.0.0.0/16" + sit: + value: "10.1.0.0/16" + dev: + value: "10.2.0.0/16" + - name: tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-vpc" + merge: + - global_tags + - name: idempotency_token + value: 019447a0-b84a-7b7f-bca5-2ee320207e51 + exports: + - vpc_id + - name: aws_get_main_route_table_id + type: query + exports: + - route_table_id + - name: aws_tag_main_vpc_route_table + type: command + props: + - name: tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-route-table" + merge: ['global_tags'] + - name: aws_vpc_subnet1 + file: aws_vpc_subnet.iql + props: + - name: availability_zone + value: "us-east-1a" + - name: cidr_block + values: + prd: + value: "10.0.0.0/19" + sit: + value: "10.1.0.0/19" + dev: + value: "10.2.0.0/19" + - name: tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-subnet-1" + merge: + - global_tags + exports: + - subnet_id: aws_vpc_subnet1_id + - name: aws_vpc_subnet2 + file: aws_vpc_subnet.iql + props: + - name: availability_zone + value: "us-east-1b" + - name: cidr_block + values: + prd: + value: "10.0.32.0/19" + sit: + value: "10.1.32.0/19" + dev: + value: "10.2.32.0/19" + - name: tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-subnet-2" + merge: + - global_tags + exports: + - subnet_id: aws_vpc_subnet2_id + - name: aws_vpc_inet_gateway + props: + - name: tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-inet-gateway" + merge: ['global_tags'] + - name: idempotency_token + value: 019447a5-f076-75f8-9173-092df5a66d35 + exports: + - internet_gateway_id + - name: aws_vpc_inet_gw_attachment + props: [] + - name: aws_vpc_subnet_rt_assn1 + file: aws_vpc_subnet_rt_assn.iql + props: + - name: subnet_id + value: "{{ aws_vpc_subnet1_id }}" + - name: idempotency_token + value: 019447aa-1c7a-775b-91dc-04db7c49f4a7 + exports: + - route_table_assn_id: aws_vpc_subnet1_rt_assn_id + - name: aws_vpc_subnet_rt_assn2 + file: aws_vpc_subnet_rt_assn.iql + props: + - name: subnet_id + value: "{{ aws_vpc_subnet2_id }}" + - name: idempotency_token + value: 019447ab-1302-754a-a580-99071f1ad814 + exports: + - route_table_assn_id: aws_vpc_subnet2_rt_assn_id + - name: aws_vpc_inet_route + props: [] + exports: + - inet_route_indentifer + - name: aws_vpc_security_group + props: + - name: group_name + value: "{{ stack_name }}-{{ stack_env }}-sg" + - name: group_description + value: "security group for {{ stack_name }} ({{ stack_env }} environment)" + - name: tags + value: + - Key: Name + Value: "{{ stack_name }}-{{ stack_env }}-sg" + merge: ['global_tags'] + exports: + - security_group_id + - name: aws_vpc_security_group_rules + props: + - name: security_group_ingress + value: + - FromPort: 1025 + ToPort: 65535 + SourceSecurityGroupOwnerId: "{{ aws_account }}" + IpProtocol: tcp + SourceSecurityGroupId: "{{ security_group_id }}" + - FromPort: 1025 + ToPort: 65535 + SourceSecurityGroupOwnerId: "{{ aws_account }}" + IpProtocol: udp + SourceSecurityGroupId: "{{ security_group_id }}" + - CidrIp: "3.237.73.224/28" + FromPort: 443 + ToPort: 443 + IpProtocol: "tcp" + - CidrIp: "54.156.226.103/32" + FromPort: 443 + ToPort: 443 + IpProtocol: "tcp" + - name: security_group_egress + value: + - CidrIp: "0.0.0.0/0" + Description: Allow all outbound traffic + FromPort: -1 + ToPort: -1 + IpProtocol: "-1" + - name: databricks_network + props: + - name: databricks_network_name + value: "{{ stack_name }}-{{ stack_env }}-network" + - name: subnet_ids + value: + - "{{ aws_vpc_subnet1_id }}" + - "{{ aws_vpc_subnet2_id }}" + - name: security_group_ids + value: + - "{{ security_group_id }}" + exports: + - databricks_network_id +# ==================================================================================== +# AWS Storage +# ==================================================================================== + - name: aws_s3_workspace_bucket + props: + - name: bucket_name + value: "{{ stack_name }}-{{ stack_env }}-root-bucket" + - name: ownership_controls + value: + Rules: + - ObjectOwnership: "BucketOwnerPreferred" + - name: bucket_encryption + value: + ServerSideEncryptionConfiguration: + - BucketKeyEnabled: true + ServerSideEncryptionByDefault: + SSEAlgorithm: "AES256" + - name: public_access_block_configuration + value: + BlockPublicAcls: true + IgnorePublicAcls: true + BlockPublicPolicy: true + RestrictPublicBuckets: true + - name: versioning_configuration + value: + Status: "Suspended" + exports: + - aws_s3_workspace_bucket_name + - aws_s3_workspace_bucket_arn + - name: aws_s3_workspace_bucket_policy + props: + - name: policy_document + value: + Version: "2012-10-17" + Statement: + - Sid: Grant Databricks Access + Effect: Allow + Principal: + AWS: "arn:aws:iam::{{ databricks_aws_account_id }}:root" + Action: + - "s3:GetObject" + - "s3:GetObjectVersion" + - "s3:PutObject" + - "s3:DeleteObject" + - "s3:ListBucket" + - "s3:GetBucketLocation" + Resource: + - "{{ aws_s3_workspace_bucket_arn }}/*" + - "{{ aws_s3_workspace_bucket_arn }}" + - name: databricks_storage_configuration + props: + - name: storage_configuration_name + value: "{{ stack_name }}-{{ stack_env }}-storage" + - name: root_bucket_info + value: + bucket_name: "{{ aws_s3_workspace_bucket_name }}" + exports: + - databricks_storage_configuration_id +# ==================================================================================== +# DBX Workspace +# ==================================================================================== + - name: databricks_workspace + props: + - name: workspace_name + value: "{{ stack_name }}-{{ stack_env }}-workspace" + - name: network_id + value: "{{ databricks_network_id }}" + - name: aws_region + value: "{{ region }}" + - name: credentials_id + value: "{{ databricks_credentials_id }}" + - name: storage_configuration_id + value: "{{ databricks_storage_configuration_id }}" + - name: pricing_tier + value: PREMIUM + exports: + - databricks_workspace_id diff --git a/requirements.txt b/requirements.txt index f1297a8..ea24142 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ click -python-dotenv jinja2 +python-dotenv pystackql>=3.6.1 +setuptools==75.8.0 diff --git a/setup.py b/setup.py index 6b086b8..7ce9d62 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( name='stackql-deploy', - version='1.8.0', + version='1.8.3', description='Model driven resource provisioning and deployment framework using StackQL.', long_description=readme, long_description_content_type='text/x-rst', diff --git a/stackql_deploy/__init__.py b/stackql_deploy/__init__.py index ce77fa0..5369601 100644 --- a/stackql_deploy/__init__.py +++ b/stackql_deploy/__init__.py @@ -1 +1 @@ -__version__ = '1.8.0' +__version__ = '1.8.3' diff --git a/stackql_deploy/cli.py b/stackql_deploy/cli.py index b3b450a..be2b275 100644 --- a/stackql_deploy/cli.py +++ b/stackql_deploy/cli.py @@ -115,20 +115,27 @@ def add_common_options(command): command = option(command) return command +def add_stackql_kwarg_options(command): + """Add options that become kwargs for StackQL initialization.""" + stackql_options = [ + click.option('--custom-registry', default=None, + help='custom registry URL for StackQL.'), + click.option('--download-dir', default=None, + help='download directory for StackQL.') + ] + for option in stackql_options: + command = option(command) + return command + # # main entry point # @click.group() -@click.option('--custom-registry', default=None, help='custom registry URL for StackQL.') -@click.option('--download-dir', default=None, help='download directory for StackQL.') @click.pass_context -def cli(ctx, custom_registry, download_dir): +def cli(ctx): """This is the main CLI entry point.""" ctx.ensure_object(dict) - ctx.obj['custom_registry'] = custom_registry - ctx.obj['download_dir'] = download_dir - def setup_command_context( ctx, @@ -140,15 +147,17 @@ def setup_command_context( dry_run, show_queries, on_failure, + custom_registry, + download_dir, command_name ): """Common initialization for commands.""" # Initialize the logger setup_logger(command_name, locals()) + # Initialize the StackQL instance and environment variables - stackql = get_stackql_instance( - ctx.obj['custom_registry'], ctx.obj['download_dir'] - ) + stackql = get_stackql_instance(custom_registry, download_dir) + # Load environment variables from the file and apply overrides env_vars = load_env_vars(env_file, env) return stackql, env_vars @@ -162,13 +171,15 @@ def setup_command_context( @click.argument('stack_dir') @click.argument('stack_env') @add_common_options +@add_stackql_kwarg_options @click.pass_context def build(ctx, stack_dir, stack_env, log_level, env_file, - env, dry_run, show_queries, on_failure): + env, dry_run, show_queries, on_failure, + custom_registry, download_dir ): """Create or update resources.""" stackql, env_vars = setup_command_context( ctx, stack_dir, stack_env, log_level, env_file, - env, dry_run, show_queries, on_failure, 'build' + env, dry_run, show_queries, on_failure, custom_registry, download_dir, 'build' ) provisioner = StackQLProvisioner( stackql, env_vars, logger, stack_dir, stack_env) @@ -192,13 +203,15 @@ def build(ctx, stack_dir, stack_env, log_level, env_file, @click.argument('stack_dir') @click.argument('stack_env') @add_common_options +@add_stackql_kwarg_options @click.pass_context def teardown(ctx, stack_dir, stack_env, log_level, env_file, - env, dry_run, show_queries, on_failure): + env, dry_run, show_queries, on_failure, + custom_registry, download_dir ): """Teardown a provisioned stack.""" stackql, env_vars = setup_command_context( ctx, stack_dir, stack_env, log_level, env_file, - env, dry_run, show_queries, on_failure, 'teardown' + env, dry_run, show_queries, on_failure, custom_registry, download_dir, 'teardown' ) deprovisioner = StackQLDeProvisioner( stackql, env_vars, logger, stack_dir, stack_env) @@ -222,13 +235,14 @@ def teardown(ctx, stack_dir, stack_env, log_level, env_file, @click.argument('stack_dir') @click.argument('stack_env') @add_common_options +@add_stackql_kwarg_options @click.pass_context def test(ctx, stack_dir, stack_env, log_level, env_file, - env, dry_run, show_queries, on_failure): + env, dry_run, show_queries, on_failure, custom_registry, download_dir): """Run test queries for the stack.""" stackql, env_vars = setup_command_context( ctx, stack_dir, stack_env, log_level, env_file, - env, dry_run, show_queries, on_failure, 'test' + env, dry_run, show_queries, on_failure, custom_registry, download_dir, 'test' ) test_runner = StackQLTestRunner( stackql, env_vars, logger, stack_dir, stack_env) diff --git a/stackql_deploy/cmd/base.py b/stackql_deploy/cmd/base.py index ece348f..6b38893 100644 --- a/stackql_deploy/cmd/base.py +++ b/stackql_deploy/cmd/base.py @@ -90,7 +90,7 @@ def process_exports( # item is the key to be exported for item in expected_exports: export_data[item] = "" - export_vars(self, resource, export_data, expected_exports, protected_exports) + export_vars(self, resource, export_data, expected_exports, all_dicts, protected_exports) self.logger.info( f"📦 dry run exports query for [{resource['name']}]:\n\n/* exports query */\n{exports_query}\n" ) @@ -114,6 +114,7 @@ def process_exports( if ignore_missing_exports: return else: + show_query(True, exports_query, self.logger) catch_error_and_exit(f"exports query failed for {resource['name']}", self.logger) if len(exports) > 1: @@ -334,3 +335,20 @@ def delete_resource( self.logger.debug(f"delete response: {msg}") else: self.logger.info(f"delete query not configured for [{resource['name']}], skipping delete...") + + def run_command(self, command_query, command_retries, command_retry_delay, dry_run, show_queries): + if command_query: + if dry_run: + self.logger.info(f"🚧 dry run command:\n\n{command_query}\n") + else: + self.logger.info("🚧 running command...") + show_query(show_queries, command_query, self.logger) + run_stackql_command( + command_query, + self.stackql, + self.logger, + retries=command_retries, + retry_delay=command_retry_delay + ) + else: + self.logger.info("command query not configured, skipping command...") diff --git a/stackql_deploy/cmd/build.py b/stackql_deploy/cmd/build.py index d5fe343..1851ce1 100644 --- a/stackql_deploy/cmd/build.py +++ b/stackql_deploy/cmd/build.py @@ -1,3 +1,4 @@ +import datetime from ..lib.utils import ( catch_error_and_exit, export_vars, @@ -33,6 +34,8 @@ def process_script_resource(self, resource, dry_run, full_context): def run(self, dry_run, show_queries, on_failure): + start_time = datetime.datetime.now() + self.logger.info( f"deploying [{self.stack_name}] in [{self.stack_env}] environment {'(dry run)' if dry_run else ''}" ) @@ -60,9 +63,8 @@ def run(self, dry_run, show_queries, on_failure): # createorupdate queries supercede create and update queries createorupdate_query = resource_queries.get('createorupdate', {}).get('rendered') createorupdate_retries = resource_queries.get('createorupdate', {}).get('options', {}).get('retries', 1) - createorupdate_retry_delay = resource_queries.get('createorupdate', {}).get('options', {}).get( - 'retry_delay', 0 - ) + createorupdate_retry_delay = resource_queries.get( + 'createorupdate', {}).get('options', {}).get('retry_delay', 0) if not createorupdate_query: create_query = resource_queries.get('create', {}).get('rendered') @@ -71,9 +73,7 @@ def run(self, dry_run, show_queries, on_failure): update_query = resource_queries.get('update', {}).get('rendered') update_retries = resource_queries.get('update', {}).get('options', {}).get('retries', 1) - update_retry_delay = resource_queries.get('update', {}).get('options', {}).get( - 'retry_delay', 0 - ) + update_retry_delay = resource_queries.get('update', {}).get('options', {}).get('retry_delay', 0) else: create_query = createorupdate_query create_retries = createorupdate_retries @@ -114,37 +114,52 @@ def run(self, dry_run, show_queries, on_failure): ignore_errors = True # - # run exists check (check if resource exists) + # exists check # - resource_exists = self.check_if_resource_exists( - resource_exists, - resource, - full_context, - exists_query, - exists_retries, - exists_retry_delay, - dry_run, - show_queries - ) - - # - # initial state check (if resource exists) - # - if resource_exists: - is_correct_state = self.check_if_resource_is_correct_state( - is_correct_state, - resource, - full_context, - statecheck_query, - statecheck_retries, - statecheck_retry_delay, - dry_run, - show_queries - ) - - # if exists and correct state, skip deploy - if resource_exists and is_correct_state: - self.logger.info(f"skipping create or update for {resource['name']}...") + if createorupdate_query: + pass + else: + if exists_query: + resource_exists = self.check_if_resource_exists( + resource_exists, + resource, + full_context, + exists_query, + exists_retries, + exists_retry_delay, + dry_run, + show_queries + ) + elif statecheck_query: + # statecheck can be used as an exists check + is_correct_state = self.check_if_resource_is_correct_state( + is_correct_state, + resource, + full_context, + statecheck_query, + statecheck_retries, + statecheck_retry_delay, + dry_run, + show_queries + ) + resource_exists = is_correct_state + else: + catch_error_and_exit("iql file must include a 'statecheck' anchor.", self.logger) + + # + # state check + # + if resource_exists and not is_correct_state: + is_correct_state = self.check_if_resource_is_correct_state( + is_correct_state, + resource, + full_context, + statecheck_query, + statecheck_retries, + statecheck_retry_delay, + dry_run, + show_queries + ) # # resource does not exist @@ -204,6 +219,12 @@ def run(self, dry_run, show_queries, on_failure): self.logger ) + if type == 'command': + # command queries + command_query = resource_queries.get('command', {}).get('rendered') + command_retries = resource_queries.get('command', {}).get('options', {}).get('retries', 1) + command_retry_delay = resource_queries.get('command', {}).get('options', {}).get('retry_delay', 0) + self.run_command(command_query, command_retries, command_retry_delay, dry_run, show_queries) # # exports # @@ -223,3 +244,6 @@ def run(self, dry_run, show_queries, on_failure): self.logger.info(f"✅ successfully deployed {resource['name']}") elif type == 'query': self.logger.info(f"✅ successfully exported variables for query in {resource['name']}") + + elapsed_time = datetime.datetime.now() - start_time + self.logger.info(f"deployment completed in {elapsed_time}") diff --git a/stackql_deploy/cmd/teardown.py b/stackql_deploy/cmd/teardown.py index 44bf769..6aeda1c 100644 --- a/stackql_deploy/cmd/teardown.py +++ b/stackql_deploy/cmd/teardown.py @@ -1,3 +1,4 @@ +import datetime from ..lib.utils import ( catch_error_and_exit, get_type @@ -38,6 +39,9 @@ def collect_exports(self, show_queries, dry_run): ) def run(self, dry_run, show_queries, on_failure): + + start_time = datetime.datetime.now() + self.logger.info( f"tearing down [{self.stack_name}] in [{self.stack_env}] " f"environment {'(dry run)' if dry_run else ''}" @@ -59,6 +63,16 @@ def run(self, dry_run, show_queries, on_failure): # get full context full_context = get_full_context(self.env, self.global_context, resource, self.logger) + # add reverse export map variable to full context + if 'exports' in resource: + for export in resource['exports']: + if isinstance(export, dict): + for key, lookup_key in export.items(): + # Get the value from full_context using the lookup_key + if lookup_key in full_context: + # Add new mapping using the export key and looked up value + full_context[key] = full_context[lookup_key] + # # get resource queries # @@ -156,3 +170,6 @@ def run(self, dry_run, show_queries, on_failure): else: if not dry_run: catch_error_and_exit(f"❌ failed to delete {resource['name']}.", self.logger) + + elapsed_time = datetime.datetime.now() - start_time + self.logger.info(f"deployment completed in {elapsed_time}") diff --git a/stackql_deploy/cmd/test.py b/stackql_deploy/cmd/test.py index b146e75..35d4b12 100644 --- a/stackql_deploy/cmd/test.py +++ b/stackql_deploy/cmd/test.py @@ -1,3 +1,4 @@ +import datetime from ..lib.utils import ( catch_error_and_exit, get_type @@ -8,6 +9,9 @@ class StackQLTestRunner(StackQLBase): def run(self, dry_run, show_queries, on_failure): + + start_time = datetime.datetime.now() + self.logger.info( f"testing [{self.stack_name}] in [{self.stack_env}] environment {'(dry run)' if dry_run else ''}" ) @@ -20,6 +24,8 @@ def run(self, dry_run, show_queries, on_failure): self.logger.info(f"exporting variables for [{resource['name']}]") elif type in ('resource', 'multi'): self.logger.info(f"testing resource [{resource['name']}], type: {type}") + elif type == 'command': + continue else: catch_error_and_exit(f"unknown resource type: {type}", self.logger) @@ -71,3 +77,6 @@ def run(self, dry_run, show_queries, on_failure): if type == 'resource' and not dry_run: self.logger.info(f"✅ test passed for {resource['name']}") + + elapsed_time = datetime.datetime.now() - start_time + self.logger.info(f"deployment completed in {elapsed_time}") diff --git a/stackql_deploy/lib/config.py b/stackql_deploy/lib/config.py index 6a80bc2..22f0fab 100644 --- a/stackql_deploy/lib/config.py +++ b/stackql_deploy/lib/config.py @@ -3,6 +3,7 @@ import json import base64 import pprint +import uuid import sys from .utils import pull_providers, catch_error_and_exit from jinja2 import Environment, FileSystemLoader, TemplateError @@ -281,6 +282,7 @@ def setup_environment(stack_dir, logger): env.filters['base64_encode'] = base64_encode env.filters['generate_patch_document'] = generate_patch_document env.filters['from_json'] = from_json + env.globals['uuid'] = lambda: str(uuid.uuid4()) logger.debug("custom Jinja filters registered: %s", env.filters.keys()) return env diff --git a/stackql_deploy/lib/utils.py b/stackql_deploy/lib/utils.py index 805fe17..59d041a 100644 --- a/stackql_deploy/lib/utils.py +++ b/stackql_deploy/lib/utils.py @@ -10,7 +10,7 @@ def catch_error_and_exit(errmsg, logger): def get_type(resource, logger): type = resource.get('type', 'resource') - if type not in ['resource', 'query', 'script', 'multi']: + if type not in ['resource', 'query', 'script', 'multi', 'command']: catch_error_and_exit(f"resource type must be 'resource', 'script', 'multi' or 'query', got '{type}'", logger) else: return type @@ -117,7 +117,7 @@ def run_stackql_command(command, logger.debug( f"(utils.run_stackql_command) executing stackql command (attempt {attempt + 1}):\n\n{command}\n" ) - # If qyery is start with 'REGISTRY PULL', check version + # If query is start with 'REGISTRY PULL', check version if command.startswith("REGISTRY PULL"): match = re.match(r'(REGISTRY PULL \w+)(::v[\d\.]+)?', command) if match: