From c39c48311eba80ab014541e2630ceeb81b717193 Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Thu, 23 Mar 2023 10:08:47 -0700 Subject: [PATCH 01/16] push to git --- macros/drop_schemas.sql | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 macros/drop_schemas.sql diff --git a/macros/drop_schemas.sql b/macros/drop_schemas.sql new file mode 100644 index 0000000..2a56c31 --- /dev/null +++ b/macros/drop_schemas.sql @@ -0,0 +1,16 @@ +{% macro drop_schemas(schema_suffixes=[], drop_target_schema=true) %} + +{% set schemas_to_drop = [target.schema] if drop_target_schema else [] %} + +{% for suffix in schema_suffixes %} +{% do schemas_to_drop.append(target.schema ~ suffix) %} +{% endfor %} + +{% for s in schemas_to_drop %} + +{{ run_query("drop schema if exists `" ~ target.database ~ "`.`" ~ s ~ "` cascade;") }} + +{# {{ print('drop schema if exists `' ~ target.database ~ '`.`' ~ s ~ '` cascade;') }} #} +{% endfor %} + +{% endmacro %} \ No newline at end of file From 9a62db90314a3ade987990a321330f56483b353c Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Fri, 24 Mar 2023 14:54:08 -0700 Subject: [PATCH 02/16] try this --- macros/drop_schemas.sql | 2 +- macros/quote_something.sql | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 macros/quote_something.sql diff --git a/macros/drop_schemas.sql b/macros/drop_schemas.sql index 2a56c31..3ad8973 100644 --- a/macros/drop_schemas.sql +++ b/macros/drop_schemas.sql @@ -8,7 +8,7 @@ {% for s in schemas_to_drop %} -{{ run_query("drop schema if exists `" ~ target.database ~ "`.`" ~ s ~ "` cascade;") }} +{{ run_query("drop schema if exists " ~ quote(target.database) ~ "." ~ quote(s) ~ " cascade;") }} {# {{ print('drop schema if exists `' ~ target.database ~ '`.`' ~ s ~ '` cascade;') }} #} {% endfor %} diff --git a/macros/quote_something.sql b/macros/quote_something.sql new file mode 100644 index 0000000..05fcf30 --- /dev/null +++ b/macros/quote_something.sql @@ -0,0 +1,22 @@ +{%- macro quote(thing) -%} + +{{ adapter.dispatch('quote', 'fivetran_utils') (thing) }} + +{%- endmacro -%} + +{%- macro default__quote(thing) -%} +{# bigquery, spark, databricks #} + `{{ thing }}` +{%- endmacro -%} + +{%- macro snowflake__quote(thing) -%} + "{{ thing | upper }}" +{%- endmacro -%} + +{%- macro redshift__quote(thing) -%} + "{{ thing }}" +{%- endmacro -%} + +{%- macro postgres__quote(thing) -%} + "{{ thing }}" +{%- endmacro -%} \ No newline at end of file From 203e09e595e475734f1d024d8ebaa0445fa49755 Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Fri, 24 Mar 2023 15:06:17 -0700 Subject: [PATCH 03/16] try it out: --- macros/drop_schemas.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/macros/drop_schemas.sql b/macros/drop_schemas.sql index 3ad8973..4b5a6a9 100644 --- a/macros/drop_schemas.sql +++ b/macros/drop_schemas.sql @@ -8,9 +8,9 @@ {% for s in schemas_to_drop %} -{{ run_query("drop schema if exists " ~ quote(target.database) ~ "." ~ quote(s) ~ " cascade;") }} +{{ run_query("drop schema if exists " ~ quote(target.database) ~ "." ~ s ~ " cascade;") }} -{# {{ print('drop schema if exists `' ~ target.database ~ '`.`' ~ s ~ '` cascade;') }} #} +{{ print("drop schema if exists " ~ quote(target.database) ~ "." ~ s ~ " cascade;") }} {% endfor %} {% endmacro %} \ No newline at end of file From 106dfb6cfd60335328ee4f32d938381231e14550 Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Fri, 24 Mar 2023 15:15:27 -0700 Subject: [PATCH 04/16] remove db --- macros/drop_schemas.sql | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/macros/drop_schemas.sql b/macros/drop_schemas.sql index 4b5a6a9..e612154 100644 --- a/macros/drop_schemas.sql +++ b/macros/drop_schemas.sql @@ -8,9 +8,8 @@ {% for s in schemas_to_drop %} -{{ run_query("drop schema if exists " ~ quote(target.database) ~ "." ~ s ~ " cascade;") }} +{{ run_query("drop schema if exists " ~ s ~ " cascade;") }} -{{ print("drop schema if exists " ~ quote(target.database) ~ "." ~ s ~ " cascade;") }} {% endfor %} {% endmacro %} \ No newline at end of file From 27fe3adc69acf89f6e6d96ba374d56d19a16cd3f Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Tue, 11 Apr 2023 16:26:08 -0700 Subject: [PATCH 05/16] better version of macro --- macros/drop_schemas.sql | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/macros/drop_schemas.sql b/macros/drop_schemas.sql index e612154..193dbfc 100644 --- a/macros/drop_schemas.sql +++ b/macros/drop_schemas.sql @@ -1,14 +1,23 @@ -{% macro drop_schemas(schema_suffixes=[], drop_target_schema=true) %} +{% macro drop_schemas(drop_target_schema=true) %} -{% set schemas_to_drop = [target.schema] if drop_target_schema else [] %} +{% set fetch_list_sql %} +select schema_name +from {{ target.database }}.INFORMATION_SCHEMA.SCHEMATA +where lower(schema_name) like '{{ target.schema | lower }}{%- if not drop_target_schema -%}_{%- endif -%}%' +{% endset %} + +{% set results = run_query(fetch_list_sql) %} + +{% if execute %} +{% set results_list = results.columns[0].values() %} +{% else %} +{% set results_list = [] %} +{% endif %} -{% for suffix in schema_suffixes %} -{% do schemas_to_drop.append(target.schema ~ suffix) %} -{% endfor %} -{% for s in schemas_to_drop %} +{% for schema_to_drop in results_list %} -{{ run_query("drop schema if exists " ~ s ~ " cascade;") }} +{{ run_query("drop schema if exists " ~ schema_to_drop ~ " cascade;") }} {% endfor %} From e0aeb2821fc9aaed6b905c8d114e0c7415611bbe Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Wed, 12 Apr 2023 14:01:35 -0700 Subject: [PATCH 06/16] try this --- macros/drop_schemas.sql | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/macros/drop_schemas.sql b/macros/drop_schemas.sql index 193dbfc..3c5480f 100644 --- a/macros/drop_schemas.sql +++ b/macros/drop_schemas.sql @@ -1,8 +1,10 @@ {% macro drop_schemas(drop_target_schema=true) %} +{% set wh_quote = '`' if target.type in ('bigquery', 'databricks', 'spark') else '"' %} + {% set fetch_list_sql %} select schema_name -from {{ target.database }}.INFORMATION_SCHEMA.SCHEMATA +from {{ wh_quote ~ (target.database|upper if target.type == 'snowflake' else target.database) ~ wh_quote }}.INFORMATION_SCHEMA.SCHEMATA where lower(schema_name) like '{{ target.schema | lower }}{%- if not drop_target_schema -%}_{%- endif -%}%' {% endset %} From 344e902ab7bc3009a1fed787ed998228241b8908 Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Wed, 12 Apr 2023 14:53:03 -0700 Subject: [PATCH 07/16] databricks --- macros/drop_schemas.sql | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/macros/drop_schemas.sql b/macros/drop_schemas.sql index 3c5480f..fd9a059 100644 --- a/macros/drop_schemas.sql +++ b/macros/drop_schemas.sql @@ -1,11 +1,19 @@ {% macro drop_schemas(drop_target_schema=true) %} -{% set wh_quote = '`' if target.type in ('bigquery', 'databricks', 'spark') else '"' %} +{% set wh_quote = '`' if target.type in ('bigquery') else '"' %} {% set fetch_list_sql %} + +{% if target.type not in ('databricks', 'spark') %} select schema_name -from {{ wh_quote ~ (target.database|upper if target.type == 'snowflake' else target.database) ~ wh_quote }}.INFORMATION_SCHEMA.SCHEMATA +from +{{ wh_quote ~ (target.database|upper if target.type == 'snowflake' else target.database) ~ wh_quote }}.INFORMATION_SCHEMA.SCHEMATA where lower(schema_name) like '{{ target.schema | lower }}{%- if not drop_target_schema -%}_{%- endif -%}%' + +{% else %} +SHOW SCHEMAS LIKE '{{ target.schema }}{%- if not drop_target_schema -%}_{%- endif -%}*' + +{% endif %} {% endset %} {% set results = run_query(fetch_list_sql) %} @@ -16,7 +24,6 @@ where lower(schema_name) like '{{ target.schema | lower }}{%- if not drop_target {% set results_list = [] %} {% endif %} - {% for schema_to_drop in results_list %} {{ run_query("drop schema if exists " ~ schema_to_drop ~ " cascade;") }} From 9501bb1aaf94f7f7b7710bffea3e8edfd05f3695 Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Wed, 12 Apr 2023 16:17:22 -0700 Subject: [PATCH 08/16] working --- macros/drop_schemas.sql | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/macros/drop_schemas.sql b/macros/drop_schemas.sql index fd9a059..696837c 100644 --- a/macros/drop_schemas.sql +++ b/macros/drop_schemas.sql @@ -1,13 +1,11 @@ {% macro drop_schemas(drop_target_schema=true) %} -{% set wh_quote = '`' if target.type in ('bigquery') else '"' %} - {% set fetch_list_sql %} {% if target.type not in ('databricks', 'spark') %} select schema_name from -{{ wh_quote ~ (target.database|upper if target.type == 'snowflake' else target.database) ~ wh_quote }}.INFORMATION_SCHEMA.SCHEMATA +{{ quote(target.database) }}.INFORMATION_SCHEMA.SCHEMATA where lower(schema_name) like '{{ target.schema | lower }}{%- if not drop_target_schema -%}_{%- endif -%}%' {% else %} From da4fff8d325c6a6ef78c047d8595d608f4e2b2d5 Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Thu, 13 Apr 2023 10:48:23 -0700 Subject: [PATCH 09/16] readme --- README.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/README.md b/README.md index 0184023..bfdefa1 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,7 @@ dispatch: - [max\_bool (source)](#max_bool-source) - [percentile (source)](#percentile-source) - [pivot\_json\_extract (source)](#pivot_json_extract-source) + - [quote (source)](#quote-source) - [string\_agg (source)](#string_agg-source) - [timestamp\_add (source)](#timestamp_add-source) - [timestamp\_diff (source)](#timestamp_diff-source) @@ -72,6 +73,7 @@ dispatch: - [add\_dbt\_source\_relation (source)](#add_dbt_source_relation-source) - [add\_pass\_through\_columns (source)](#add_pass_through_columns-source) - [calculated\_fields (source)](#calculated_fields-source) + - [drop\_schemas (source)](#drop_schemas-source) - [dummy\_coalesce\_value (source)](#dummy_coalesce_value-source) - [fill\_pass\_through\_columns (source)](#fill_pass_through_columns-source) - [fill\_staging\_columns (source)](#fill_staging_columns-source) @@ -249,6 +251,16 @@ This macro builds off of the `json_extract` macro in order to extract a list of * `string` (required): Name of the field which contains the json object. * `list_of_properties` (required): List of the fields that you want to extract from the json object and pivot out into columns. +---- +### quote ([source](macros/quote_something.sql)) +This macro takes an object returns it wrapped in database-appropriate quotes (and casing). + +**Usage:** +```sql +{{ fivetran_utils.quote(thing="reserved_keyword_mayhaps") }} +``` +**Args:** +* `thing` (required): SQL object you want to quote. ---- ### string_agg ([source](macros/string_agg.sql)) This macro allows for cross database field aggregation and delimiter customization. Supported database specific field aggregation functions include @@ -344,6 +356,19 @@ vars: * `variable` (required): The variable containing the calculated field `name` and `transform_sql`. ---- + +### drop_schemas ([source](macros/drop_schemas.sql)) +This macro was created to be clean up the schemas in our integration test environments. It drops schemas that are `like` the `target.schema`. By default it will drop the target schema as well but this can be configured. + +**Usage:** +```zsh +dbt run-operation fivetran_utils.drop_schemas +``` +**Args:** +* `drop_target_schema` (optional): Boolean that is `true` by default. If `false`, the target schema will not be dropped. + +---- + ### dummy_coalesce_value ([source](macros/dummy_coalesce_value.sql)) This macro creates a dummy coalesce value based on the data type of the field. See below for the respective data type and dummy values: - String = 'DUMMY_STRING' From bdfa76c5870487e44a8ffb0ad8db941776a714ae Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Mon, 17 Apr 2023 14:11:38 -0700 Subject: [PATCH 10/16] sheri and joe feedback --- README.md | 18 ++++++++++++++++-- macros/drop_schemas.sql | 6 ++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index bfdefa1..4df535d 100644 --- a/README.md +++ b/README.md @@ -361,8 +361,22 @@ vars: This macro was created to be clean up the schemas in our integration test environments. It drops schemas that are `like` the `target.schema`. By default it will drop the target schema as well but this can be configured. **Usage:** -```zsh -dbt run-operation fivetran_utils.drop_schemas +At the end of a Buildkite integration test job in `.buildkite/scripts/run_models.sh`: +```sh +# do all the setup, dbt seed, compile, run, test steps beforehand... +dbt run-operation fivetran_utils.drop_schemas --target "$db" +``` + +As a Fivetran Transformation job step in a `deployment.yml`: +```yml +jobs: + - name: cleanup + schedule: '0 0 * * 0' # The example will run once a week at 00:00 on Sunday. + steps: + - name: drop schemas but leave target + command: dbt run-operation fivetran_utils.drop_schemas --target "$db" --vars '{"drop_target_schema": False}' + - name: drop schemas including target + command: dbt run-operation fivetran_utils.drop_schemas --target "$db" ``` **Args:** * `drop_target_schema` (optional): Boolean that is `true` by default. If `false`, the target schema will not be dropped. diff --git a/macros/drop_schemas.sql b/macros/drop_schemas.sql index 696837c..ad990c1 100644 --- a/macros/drop_schemas.sql +++ b/macros/drop_schemas.sql @@ -1,4 +1,4 @@ -{% macro drop_schemas(drop_target_schema=true) %} +{% macro drop_schemas(drop_target_schema=true, schema_patterns=[target.schema]) %} {% set fetch_list_sql %} @@ -24,7 +24,9 @@ SHOW SCHEMAS LIKE '{{ target.schema }}{%- if not drop_target_schema -%}_{%- endi {% for schema_to_drop in results_list %} -{{ run_query("drop schema if exists " ~ schema_to_drop ~ " cascade;") }} +{# {{ run_query("drop schema if exists " ~ schema_to_drop ~ " cascade;") }} #} + +{% do adapter.drop_schema(api.Relation.create(database=target.database, schema=schema_to_drop)) %} {% endfor %} From 638dacc7f031a70e41d9aad3c95e0a4659cfe86f Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Mon, 17 Apr 2023 14:23:01 -0700 Subject: [PATCH 11/16] joe sheri feedback --- README.md | 8 ++++---- macros/drop_schemas.sql | 33 ------------------------------ macros/drop_schemas_automation.sql | 30 +++++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 37 deletions(-) delete mode 100644 macros/drop_schemas.sql create mode 100644 macros/drop_schemas_automation.sql diff --git a/README.md b/README.md index 4df535d..0d885aa 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ dispatch: - [add\_dbt\_source\_relation (source)](#add_dbt_source_relation-source) - [add\_pass\_through\_columns (source)](#add_pass_through_columns-source) - [calculated\_fields (source)](#calculated_fields-source) - - [drop\_schemas (source)](#drop_schemas-source) + - [drop\_schemas\_automation (source)](#drop_schemas_automation-source) - [dummy\_coalesce\_value (source)](#dummy_coalesce_value-source) - [fill\_pass\_through\_columns (source)](#fill_pass_through_columns-source) - [fill\_staging\_columns (source)](#fill_staging_columns-source) @@ -357,7 +357,7 @@ vars: ---- -### drop_schemas ([source](macros/drop_schemas.sql)) +### drop_schemas_automation ([source](macros/drop_schemas_automation.sql)) This macro was created to be clean up the schemas in our integration test environments. It drops schemas that are `like` the `target.schema`. By default it will drop the target schema as well but this can be configured. **Usage:** @@ -374,9 +374,9 @@ jobs: schedule: '0 0 * * 0' # The example will run once a week at 00:00 on Sunday. steps: - name: drop schemas but leave target - command: dbt run-operation fivetran_utils.drop_schemas --target "$db" --vars '{"drop_target_schema": False}' + command: dbt run-operation fivetran_utils.drop_schemas --vars '{"drop_target_schema": False}' - name: drop schemas including target - command: dbt run-operation fivetran_utils.drop_schemas --target "$db" + command: dbt run-operation fivetran_utils.drop_schemas_automation ``` **Args:** * `drop_target_schema` (optional): Boolean that is `true` by default. If `false`, the target schema will not be dropped. diff --git a/macros/drop_schemas.sql b/macros/drop_schemas.sql deleted file mode 100644 index ad990c1..0000000 --- a/macros/drop_schemas.sql +++ /dev/null @@ -1,33 +0,0 @@ -{% macro drop_schemas(drop_target_schema=true, schema_patterns=[target.schema]) %} - -{% set fetch_list_sql %} - -{% if target.type not in ('databricks', 'spark') %} -select schema_name -from -{{ quote(target.database) }}.INFORMATION_SCHEMA.SCHEMATA -where lower(schema_name) like '{{ target.schema | lower }}{%- if not drop_target_schema -%}_{%- endif -%}%' - -{% else %} -SHOW SCHEMAS LIKE '{{ target.schema }}{%- if not drop_target_schema -%}_{%- endif -%}*' - -{% endif %} -{% endset %} - -{% set results = run_query(fetch_list_sql) %} - -{% if execute %} -{% set results_list = results.columns[0].values() %} -{% else %} -{% set results_list = [] %} -{% endif %} - -{% for schema_to_drop in results_list %} - -{# {{ run_query("drop schema if exists " ~ schema_to_drop ~ " cascade;") }} #} - -{% do adapter.drop_schema(api.Relation.create(database=target.database, schema=schema_to_drop)) %} - -{% endfor %} - -{% endmacro %} \ No newline at end of file diff --git a/macros/drop_schemas_automation.sql b/macros/drop_schemas_automation.sql new file mode 100644 index 0000000..bb8c543 --- /dev/null +++ b/macros/drop_schemas_automation.sql @@ -0,0 +1,30 @@ +{% macro drop_schemas_automation(drop_target_schema=true) %} + {{ return(adapter.dispatch('drop_schemas_automation')(drop_target_schema)) }} +{%- endmacro %} + +{% macro default__drop_schemas_automation(drop_target_schema=true) %} + +{% set fetch_list_sql %} + {% if target.type not in ('databricks', 'spark') %} + select schema_name + from + {{ quote(target.database) }}.INFORMATION_SCHEMA.SCHEMATA + where lower(schema_name) like '{{ target.schema | lower }}{%- if not drop_target_schema -%}_{%- endif -%}%' + {% else %} + SHOW SCHEMAS LIKE '{{ target.schema }}{%- if not drop_target_schema -%}_{%- endif -%}*' + {% endif %} +{% endset %} + +{% set results = run_query(fetch_list_sql) %} + +{% if execute %} + {% set results_list = results.columns[0].values() %} +{% else %} + {% set results_list = [] %} +{% endif %} + +{% for schema_to_drop in results_list %} + {% do adapter.drop_schema(api.Relation.create(database=target.database, schema=schema_to_drop)) %} +{% endfor %} + +{% endmacro %} \ No newline at end of file From 82940d4f3d0e550af275cb997e45a7c352bffaff Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Mon, 17 Apr 2023 14:29:23 -0700 Subject: [PATCH 12/16] polish --- CHANGELOG.md | 8 +++++++- README.md | 6 +++--- dbt_project.yml | 2 +- integration_tests/dbt_project.yml | 2 +- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 04edceb..f99d9ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ -# dbt_fivetran_utils v0.4.3 +# dbt_fivetran_utils v0.4.4 + +## Feature Updates +[PR #106](https://github.com/fivetran/dbt_fivetran_utils/pull/106) introduces the following two new macros: +- [drop_schemas_automation](https://github.com/fivetran/dbt_fivetran_utils/tree/explore/drop-integration-test-schemas#drop_schemas_automation-source) +- [quote](https://github.com/fivetran/dbt_fivetran_utils/tree/explore/drop-integration-test-schemas#quote-source) +# dbt_fivetran_utils v0.4.3 ## Feature Updates - ([PR #100](https://github.com/fivetran/dbt_fivetran_utils/pull/100)) Expanded the `union_data` macro to create an empty table if none of the provided schemas or databases contain a source table. If the source table does not exist anywhere, `union_data` will return a **completely** empty table (ie `limit 0`) with just one string column (`_dbt_source_relation`) and raise a compiler warning message that the respective staging model is empty. diff --git a/README.md b/README.md index 0d885aa..5fe6941 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ This package includes macros that are used across Fivetran's dbt packages. This # 🎯 How do I use the dbt package? ## Step 1: Installing the Package -Include the following fivetran_utils package version in your `packages.yml`. Please note that this package is installed by default within **all** Fivetran dbt packages. +Include the following fivetran_utils package version in your `packages.yml` if you do not have any other Fivetran dbt packag dependencies. Please note that this package is installed by default within **all** Fivetran dbt packages. > Check [dbt Hub](https://hub.getdbt.com/) for the latest installation instructions, or [read the dbt docs](https://docs.getdbt.com/docs/package-management) for more information on installing packages. ```yaml packages: @@ -364,7 +364,7 @@ This macro was created to be clean up the schemas in our integration test enviro At the end of a Buildkite integration test job in `.buildkite/scripts/run_models.sh`: ```sh # do all the setup, dbt seed, compile, run, test steps beforehand... -dbt run-operation fivetran_utils.drop_schemas --target "$db" +dbt run-operation fivetran_utils.drop_schemas_automation --target "$db" ``` As a Fivetran Transformation job step in a `deployment.yml`: @@ -374,7 +374,7 @@ jobs: schedule: '0 0 * * 0' # The example will run once a week at 00:00 on Sunday. steps: - name: drop schemas but leave target - command: dbt run-operation fivetran_utils.drop_schemas --vars '{"drop_target_schema": False}' + command: dbt run-operation fivetran_utils.drop_schemas_automation --vars '{"drop_target_schema": False}' - name: drop schemas including target command: dbt run-operation fivetran_utils.drop_schemas_automation ``` diff --git a/dbt_project.yml b/dbt_project.yml index 6113a4f..b58d84f 100644 --- a/dbt_project.yml +++ b/dbt_project.yml @@ -1,4 +1,4 @@ name: 'fivetran_utils' -version: '0.4.3' +version: '0.4.4' config-version: 2 require-dbt-version: [">=1.3.0", "<2.0.0"] diff --git a/integration_tests/dbt_project.yml b/integration_tests/dbt_project.yml index 1cf407d..35556be 100644 --- a/integration_tests/dbt_project.yml +++ b/integration_tests/dbt_project.yml @@ -1,5 +1,5 @@ name: 'fivetran_utils_integration_tests' -version: '0.4.3' +version: '0.4.4' config-version: 2 profile: 'integration_tests' From 6554d6148dfe2d88b7b484e7f79a42936f23e076 Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Tue, 18 Apr 2023 11:59:46 -0700 Subject: [PATCH 13/16] Update README.md Co-authored-by: Joe Markiewicz <74217849+fivetran-joemarkiewicz@users.noreply.github.com> --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5fe6941..b1b2a41 100644 --- a/README.md +++ b/README.md @@ -358,7 +358,7 @@ vars: ---- ### drop_schemas_automation ([source](macros/drop_schemas_automation.sql)) -This macro was created to be clean up the schemas in our integration test environments. It drops schemas that are `like` the `target.schema`. By default it will drop the target schema as well but this can be configured. +This macro was created to help clean up the schemas in our integration test environments. It drops schemas that are `like` the `target.schema`. By default it will drop the target schema as well but this can be configured. **Usage:** At the end of a Buildkite integration test job in `.buildkite/scripts/run_models.sh`: From 3ca2a3653ccab2e4945f509dad7a8ff9b8d6c2de Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Tue, 18 Apr 2023 12:26:03 -0700 Subject: [PATCH 14/16] joe review --- README.md | 2 +- macros/drop_schemas_automation.sql | 3 ++- macros/quote_something.sql | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5fe6941..299cbef 100644 --- a/README.md +++ b/README.md @@ -253,7 +253,7 @@ This macro builds off of the `json_extract` macro in order to extract a list of ---- ### quote ([source](macros/quote_something.sql)) -This macro takes an object returns it wrapped in database-appropriate quotes (and casing). +This macro takes a SQL object (ie database, schema, column) and returns it wrapped in database-appropriate quotes (and casing for Snowflake). **Usage:** ```sql diff --git a/macros/drop_schemas_automation.sql b/macros/drop_schemas_automation.sql index bb8c543..5dd85ca 100644 --- a/macros/drop_schemas_automation.sql +++ b/macros/drop_schemas_automation.sql @@ -1,5 +1,5 @@ {% macro drop_schemas_automation(drop_target_schema=true) %} - {{ return(adapter.dispatch('drop_schemas_automation')(drop_target_schema)) }} + {{ return(adapter.dispatch('drop_schemas_automation', 'fivetran_utils')(drop_target_schema)) }} {%- endmacro %} {% macro default__drop_schemas_automation(drop_target_schema=true) %} @@ -25,6 +25,7 @@ {% for schema_to_drop in results_list %} {% do adapter.drop_schema(api.Relation.create(database=target.database, schema=schema_to_drop)) %} + {{ print('Schema ' ~ schema_to_drop ~ ' successfully dropped from the ' ~ target.database ~ ' database.\n')}} {% endfor %} {% endmacro %} \ No newline at end of file diff --git a/macros/quote_something.sql b/macros/quote_something.sql index 05fcf30..99d1287 100644 --- a/macros/quote_something.sql +++ b/macros/quote_something.sql @@ -1,6 +1,6 @@ {%- macro quote(thing) -%} -{{ adapter.dispatch('quote', 'fivetran_utils') (thing) }} +{{ return(adapter.dispatch('quote', 'fivetran_utils')(thing)) }} {%- endmacro -%} From a0937e65314051fe8b25087fb03946c7d08ede76 Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Tue, 18 Apr 2023 12:30:38 -0700 Subject: [PATCH 15/16] update quote name --- CHANGELOG.md | 2 +- README.md | 21 ++++++++++----------- macros/drop_schemas_automation.sql | 2 +- macros/quote_something.sql | 22 ---------------------- macros/wrap_in_quotes.sql | 22 ++++++++++++++++++++++ 5 files changed, 34 insertions(+), 35 deletions(-) delete mode 100644 macros/quote_something.sql create mode 100644 macros/wrap_in_quotes.sql diff --git a/CHANGELOG.md b/CHANGELOG.md index f99d9ef..c24a481 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ ## Feature Updates [PR #106](https://github.com/fivetran/dbt_fivetran_utils/pull/106) introduces the following two new macros: - [drop_schemas_automation](https://github.com/fivetran/dbt_fivetran_utils/tree/explore/drop-integration-test-schemas#drop_schemas_automation-source) -- [quote](https://github.com/fivetran/dbt_fivetran_utils/tree/explore/drop-integration-test-schemas#quote-source) +- [wrap_in_quotes](https://github.com/fivetran/dbt_fivetran_utils/tree/explore/drop-integration-test-schemas#wrap_in_quotes-source) # dbt_fivetran_utils v0.4.3 diff --git a/README.md b/README.md index 6cfbc0e..c1f92ea 100644 --- a/README.md +++ b/README.md @@ -64,11 +64,11 @@ dispatch: - [max\_bool (source)](#max_bool-source) - [percentile (source)](#percentile-source) - [pivot\_json\_extract (source)](#pivot_json_extract-source) - - [quote (source)](#quote-source) - [string\_agg (source)](#string_agg-source) - [timestamp\_add (source)](#timestamp_add-source) - [timestamp\_diff (source)](#timestamp_diff-source) - [try\_cast (source)](#try_cast-source) + - [wrap\_in\_quotes (source)](#wrap_in_quotes-source) - [SQL and field generators](#sql-and-field-generators) - [add\_dbt\_source\_relation (source)](#add_dbt_source_relation-source) - [add\_pass\_through\_columns (source)](#add_pass_through_columns-source) @@ -251,16 +251,6 @@ This macro builds off of the `json_extract` macro in order to extract a list of * `string` (required): Name of the field which contains the json object. * `list_of_properties` (required): List of the fields that you want to extract from the json object and pivot out into columns. ----- -### quote ([source](macros/quote_something.sql)) -This macro takes a SQL object (ie database, schema, column) and returns it wrapped in database-appropriate quotes (and casing for Snowflake). - -**Usage:** -```sql -{{ fivetran_utils.quote(thing="reserved_keyword_mayhaps") }} -``` -**Args:** -* `thing` (required): SQL object you want to quote. ---- ### string_agg ([source](macros/string_agg.sql)) This macro allows for cross database field aggregation and delimiter customization. Supported database specific field aggregation functions include @@ -312,7 +302,16 @@ This macro allows a field to be cast to a specified datatype. If the datatype is * `type` (required): The datatype you want to try and cast the base field. ---- +### wrap_in_quotes ([source](macros/wrap_in_quotes.sql)) +This macro takes a SQL object (ie database, schema, column) and returns it wrapped in database-appropriate quotes (and casing for Snowflake). +**Usage:** +```sql +{{ fivetran_utils.wrap_in_quotes(thing="reserved_keyword_mayhaps") }} +``` +**Args:** +* `thing` (required): SQL object you want to quote. +---- ## SQL and field generators These macros create SQL or fields to be included when running the package. ### add_dbt_source_relation ([source](macros/add_dbt_source_relation.sql)) diff --git a/macros/drop_schemas_automation.sql b/macros/drop_schemas_automation.sql index 5dd85ca..75cff7a 100644 --- a/macros/drop_schemas_automation.sql +++ b/macros/drop_schemas_automation.sql @@ -8,7 +8,7 @@ {% if target.type not in ('databricks', 'spark') %} select schema_name from - {{ quote(target.database) }}.INFORMATION_SCHEMA.SCHEMATA + {{ wrap_in_quotes(target.database) }}.INFORMATION_SCHEMA.SCHEMATA where lower(schema_name) like '{{ target.schema | lower }}{%- if not drop_target_schema -%}_{%- endif -%}%' {% else %} SHOW SCHEMAS LIKE '{{ target.schema }}{%- if not drop_target_schema -%}_{%- endif -%}*' diff --git a/macros/quote_something.sql b/macros/quote_something.sql deleted file mode 100644 index 99d1287..0000000 --- a/macros/quote_something.sql +++ /dev/null @@ -1,22 +0,0 @@ -{%- macro quote(thing) -%} - -{{ return(adapter.dispatch('quote', 'fivetran_utils')(thing)) }} - -{%- endmacro -%} - -{%- macro default__quote(thing) -%} -{# bigquery, spark, databricks #} - `{{ thing }}` -{%- endmacro -%} - -{%- macro snowflake__quote(thing) -%} - "{{ thing | upper }}" -{%- endmacro -%} - -{%- macro redshift__quote(thing) -%} - "{{ thing }}" -{%- endmacro -%} - -{%- macro postgres__quote(thing) -%} - "{{ thing }}" -{%- endmacro -%} \ No newline at end of file diff --git a/macros/wrap_in_quotes.sql b/macros/wrap_in_quotes.sql new file mode 100644 index 0000000..f514952 --- /dev/null +++ b/macros/wrap_in_quotes.sql @@ -0,0 +1,22 @@ +{%- macro wrap_in_quotes(thing) -%} + +{{ return(adapter.dispatch('wrap_in_quotes', 'fivetran_utils')(thing)) }} + +{%- endmacro -%} + +{%- macro default__wrap_in_quotes(thing) -%} +{# bigquery, spark, databricks #} + `{{ thing }}` +{%- endmacro -%} + +{%- macro snowflake__wrap_in_quotes(thing) -%} + "{{ thing | upper }}" +{%- endmacro -%} + +{%- macro redshift__wrap_in_quotes(thing) -%} + "{{ thing }}" +{%- endmacro -%} + +{%- macro postgres__wrap_in_quotes(thing) -%} + "{{ thing }}" +{%- endmacro -%} \ No newline at end of file From 20e3de02e0c3f7dc5985bbe98e7b037ba0633c01 Mon Sep 17 00:00:00 2001 From: Jamie Rodriguez <65564846+fivetran-jamie@users.noreply.github.com> Date: Tue, 18 Apr 2023 12:44:47 -0700 Subject: [PATCH 16/16] updated thing argument --- README.md | 4 ++-- macros/wrap_in_quotes.sql | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index c1f92ea..3a46576 100644 --- a/README.md +++ b/README.md @@ -307,10 +307,10 @@ This macro takes a SQL object (ie database, schema, column) and returns it wrapp **Usage:** ```sql -{{ fivetran_utils.wrap_in_quotes(thing="reserved_keyword_mayhaps") }} +{{ fivetran_utils.wrap_in_quotes(object_to_quote="reserved_keyword_mayhaps") }} ``` **Args:** -* `thing` (required): SQL object you want to quote. +* `object_to_quote` (required): SQL object you want to quote. ---- ## SQL and field generators These macros create SQL or fields to be included when running the package. diff --git a/macros/wrap_in_quotes.sql b/macros/wrap_in_quotes.sql index f514952..acb0a4f 100644 --- a/macros/wrap_in_quotes.sql +++ b/macros/wrap_in_quotes.sql @@ -1,22 +1,22 @@ -{%- macro wrap_in_quotes(thing) -%} +{%- macro wrap_in_quotes(object_to_quote) -%} -{{ return(adapter.dispatch('wrap_in_quotes', 'fivetran_utils')(thing)) }} +{{ return(adapter.dispatch('wrap_in_quotes', 'fivetran_utils')(object_to_quote)) }} {%- endmacro -%} -{%- macro default__wrap_in_quotes(thing) -%} +{%- macro default__wrap_in_quotes(object_to_quote) -%} {# bigquery, spark, databricks #} - `{{ thing }}` + `{{ object_to_quote }}` {%- endmacro -%} -{%- macro snowflake__wrap_in_quotes(thing) -%} - "{{ thing | upper }}" +{%- macro snowflake__wrap_in_quotes(object_to_quote) -%} + "{{ object_to_quote | upper }}" {%- endmacro -%} -{%- macro redshift__wrap_in_quotes(thing) -%} - "{{ thing }}" +{%- macro redshift__wrap_in_quotes(object_to_quote) -%} + "{{ object_to_quote }}" {%- endmacro -%} -{%- macro postgres__wrap_in_quotes(thing) -%} - "{{ thing }}" +{%- macro postgres__wrap_in_quotes(object_to_quote) -%} + "{{ object_to_quote }}" {%- endmacro -%} \ No newline at end of file