diff --git a/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/airflow.md b/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/airflow.md index 5a75e741a646..fb70536a0e58 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/airflow.md +++ b/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/airflow.md @@ -30,7 +30,7 @@ To deploy OpenMetadata, check the Deployment guides. To run the Ingestion via the UI you'll need to use the OpenMetadata Ingestion Container, which comes shipped with custom Airflow plugins to handle the workflow deployment. -**Note:** For metadata ingestion, kindly make sure add alteast `dashboard` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add atleast `dashboard` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/cli.md b/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/cli.md index 46f0f3e94801..da702b3ba0a8 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/cli.md +++ b/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/cli.md @@ -30,7 +30,7 @@ To deploy OpenMetadata, check the Deployment guides. To run the Ingestion via the UI you'll need to use the OpenMetadata Ingestion Container, which comes shipped with custom Airflow plugins to handle the workflow deployment. -**Note:** For metadata ingestion, kindly make sure add alteast `dashboard` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add atleast `dashboard` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/index.md b/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/index.md index 746a6fcc7144..ac52a75b24dd 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/index.md +++ b/openmetadata-docs/content/v1.0.x/connectors/dashboard/domo-dashboard/index.md @@ -48,7 +48,7 @@ To run the Ingestion via the UI you'll need to use the OpenMetadata Ingestion Co custom Airflow plugins to handle the workflow deployment. {% note noteType="Warning" %} -For metadata ingestion, kindly make sure add alteast `dashboard` scopes to the clientId provided. +For metadata ingestion, kindly make sure add atleast `dashboard` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/airflow.md b/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/airflow.md index 4ab1e55ec184..fdd4e059dcd8 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/airflow.md +++ b/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/airflow.md @@ -36,7 +36,7 @@ The ingestion also works with Superset 2.0.0 🎉 **API Connection**: To extract metadata from Superset via API, user must have at least `can read on Chart` & `can read on Dashboard` permissions. -**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` priviledge on `dashboards` & `slices` tables within superset schema. +**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` privilege on `dashboards` & `slices` tables within superset schema. ### Python Requirements diff --git a/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/cli.md b/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/cli.md index 65c4e0937cfe..ad79a3c6a1e2 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/cli.md +++ b/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/cli.md @@ -36,7 +36,7 @@ The ingestion also works with Superset 2.0.0 🎉 **API Connection**: To extract metadata from Superset via API, user must have at least `can read on Chart` & `can read on Dashboard` permissions. -**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` priviledge on `dashboards` & `slices` tables within superset schema. +**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` privilege on `dashboards` & `slices` tables within superset schema. ### Python Requirements diff --git a/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/index.md b/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/index.md index 6bf321199885..c132ea432f7e 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/index.md +++ b/openmetadata-docs/content/v1.0.x/connectors/dashboard/superset/index.md @@ -54,7 +54,7 @@ The ingestion also works with Superset 2.0.0 🎉 **API Connection**: To extract metadata from Superset via API, user must have at least `can read on Chart` & `can read on Dashboard` permissions. -**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` priviledge on `dashboards` & `slices` tables within superset schema. +**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` privilege on `dashboards` & `slices` tables within superset schema. ## Metadata Ingestion diff --git a/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/airflow.md b/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/airflow.md index 565506b5db23..ca510b85e1fa 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/airflow.md +++ b/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/airflow.md @@ -23,7 +23,7 @@ Configure and schedule Tableau metadata and profiler workflows from the OpenMeta ## Requirements -To ingest tableau metadata, minimum `Site Role: Viewer` is requried for the tableau user. +To ingest tableau metadata, minimum `Site Role: Viewer` is required for the tableau user. {%inlineCallout icon="description" bold="OpenMetadata 0.12 or later" href="/deployment"%} To deploy OpenMetadata, check the Deployment guides. diff --git a/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/cli.md b/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/cli.md index b106b8ecbbff..26d40d4285b0 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/cli.md +++ b/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/cli.md @@ -23,7 +23,7 @@ Configure and schedule Tableau metadata and profiler workflows from the OpenMeta ## Requirements -To ingest tableau metadata, minimum `Site Role: Viewer` is requried for the tableau user. +To ingest tableau metadata, minimum `Site Role: Viewer` is required for the tableau user. {%inlineCallout icon="description" bold="OpenMetadata 0.12 or later" href="/deployment"%} To deploy OpenMetadata, check the Deployment guides. diff --git a/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/index.md b/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/index.md index ef96a0fb6572..1062f47412e0 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/index.md +++ b/openmetadata-docs/content/v1.0.x/connectors/dashboard/tableau/index.md @@ -41,7 +41,7 @@ the following docs to connect using Airflow SDK or with the CLI. ## Requirements -To ingest tableau metadata, minimum `Site Role: Viewer` is requried for the tableau user. +To ingest tableau metadata, minimum `Site Role: Viewer` is required for the tableau user. {%inlineCallout icon="description" bold="OpenMetadata 0.12 or later" href="/deployment"%} To deploy OpenMetadata, check the Deployment guides. diff --git a/openmetadata-docs/content/v1.0.x/connectors/database/bigquery/roles.md b/openmetadata-docs/content/v1.0.x/connectors/database/bigquery/roles.md index c1928dcffca4..d8647952c6a7 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/database/bigquery/roles.md +++ b/openmetadata-docs/content/v1.0.x/connectors/database/bigquery/roles.md @@ -51,10 +51,12 @@ You can search for the required permissions in the filter box and add them accor | 5 | resourcemanager.projects.get | Metadata Ingestion | | 6 | bigquery.jobs.create | Metadata Ingestion | | 7 | bigquery.jobs.listAll | Metadata Ingestion | -| 8 | datacatalog.taxonomies.get | Fetch Policy Tags | -| 9 | datacatalog.taxonomies.list | Fetch Policy Tags | -| 10 | bigquery.readsessions.create | Bigquery Usage & Lineage Workflow | -| 11 | bigquery.readsessions.getData | Bigquery Usage & Lineage Workflow | +| 8 | bigquery.routines.get | Stored Procedure | +| 9 | bigquery.routines.list | Stored Procedure | +| 10 | datacatalog.taxonomies.get | Fetch Policy Tags | +| 11 | datacatalog.taxonomies.list | Fetch Policy Tags | +| 12 | bigquery.readsessions.create | Bigquery Usage & Lineage Workflow | +| 13 | bigquery.readsessions.getData | Bigquery Usage & Lineage Workflow | {% image src="/images/v1.0/connectors/bigquery/create-role-4.png" diff --git a/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/airflow.md b/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/airflow.md index f278bf77cc69..d0686116edc5 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/airflow.md +++ b/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/airflow.md @@ -46,7 +46,7 @@ custom Airflow plugins to handle the workflow deployment. **Note:** -For metadata ingestion, kindly make sure add alteast `data` scopes to the clientId provided. +For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/cli.md b/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/cli.md index 3dcd032068a2..af426ae70047 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/cli.md +++ b/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/cli.md @@ -46,7 +46,7 @@ custom Airflow plugins to handle the workflow deployment. **Note:** -For metadata ingestion, kindly make sure add alteast `data` scopes to the clientId provided. +For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). @@ -62,7 +62,7 @@ pip3 install "openmetadata-ingestion[domo]" All connectors are defined as JSON Schemas. [Here](https://github.com/open-metadata/OpenMetadata/blob/main/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/database/athenaConnection.json) -you can find the structure to create a connection to DomoDatbase. +you can find the structure to create a connection to DomoDatabase. In order to create and run a Metadata Ingestion workflow, we will follow the steps to create a YAML configuration able to connect to the source, diff --git a/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/index.md b/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/index.md index eaed8d7d52d7..36d0c7356848 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/index.md +++ b/openmetadata-docs/content/v1.0.x/connectors/database/domo-database/index.md @@ -65,7 +65,7 @@ custom Airflow plugins to handle the workflow deployment. **Note:** -For metadata ingestion, kindly make sure add alteast `data` scopes to the clientId provided. +For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ## Metadata Ingestion diff --git a/openmetadata-docs/content/v1.0.x/connectors/database/mysql/index.md b/openmetadata-docs/content/v1.0.x/connectors/database/mysql/index.md index 4af5800b4ef1..09f97a8a94e3 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/database/mysql/index.md +++ b/openmetadata-docs/content/v1.0.x/connectors/database/mysql/index.md @@ -67,7 +67,7 @@ custom Airflow plugins to handle the workflow deployment. Note that We support MySQL (version 8.0.0 or greater) and the user should have access to the `INFORMATION_SCHEMA` table. By default a user can see only the rows in the `INFORMATION_SCHEMA` that correspond to objects for which the user has the proper access privileges. ```SQL --- Create user. If is ommited, defaults to '%' +-- Create user. If is omitted, defaults to '%' -- More details https://dev.mysql.com/doc/refman/8.0/en/create-user.html CREATE USER ''[@''] IDENTIFIED BY ''; diff --git a/openmetadata-docs/content/v1.0.x/connectors/database/redshift/troubleshooting.md b/openmetadata-docs/content/v1.0.x/connectors/database/redshift/troubleshooting.md index c4f0ccb043de..3906085f1e15 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/database/redshift/troubleshooting.md +++ b/openmetadata-docs/content/v1.0.x/connectors/database/redshift/troubleshooting.md @@ -13,7 +13,7 @@ connection to server at \":\" (@IP), does not match host name \":\" ``` -If you get this error that time plese pass `{'sslmode': 'verify-ca'}` in the connection arguments. +If you get this error that time please pass `{'sslmode': 'verify-ca'}` in the connection arguments. {% image src="/images/v1.0/connectors/redshift/service-connection-arguments.png" diff --git a/openmetadata-docs/content/v1.0.x/connectors/ingestion/deployment/index.md b/openmetadata-docs/content/v1.0.x/connectors/ingestion/deployment/index.md index 59c7d88befbb..4a8d5e42fef3 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/ingestion/deployment/index.md +++ b/openmetadata-docs/content/v1.0.x/connectors/ingestion/deployment/index.md @@ -70,7 +70,7 @@ information received in the shape of an `IngestionPipeline` Entity, and the spec After creating a new workflow from the UI or when editing it, there are two calls happening: - `POST` or `PUT` call to update the `Ingestion Pipeline Entity`, -- `/deploy` HTTP call to the `IngestionPipelienResource` to trigger the deployment of the new or updated DAG in the Orchestrator. +- `/deploy` HTTP call to the `IngestionPipelineResource` to trigger the deployment of the new or updated DAG in the Orchestrator. {% image src="/images/v1.0/features/ingestion/ingestion-pipeline/ingestion-pipeline-software-system.drawio.png" diff --git a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/data-quality/index.md b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/data-quality/index.md index c85202046404..2b747a8426aa 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/data-quality/index.md +++ b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/data-quality/index.md @@ -236,7 +236,7 @@ configurations specified above. ## How to Visualize Test Results ### From the Test Suite View -From the home page click on the Test Suite menu in the left pannel. +From the home page click on the Test Suite menu in the left panel. {% image src="/images/v1.0/features/ingestion/workflows/data-quality/test-suite-home-page.png" @@ -266,7 +266,7 @@ From there you can select a Test Suite and visualize the results associated with ### From a Table Entity Navigate to your table and click on the `profiler` tab. From there you'll be able to see test results at the table or column level. #### Table Level Test Results -In the top pannel, click on the white background `Data Quality` button. This will bring you to a summary of all your quality tests at the table level +In the top panel, click on the white background `Data Quality` button. This will bring you to a summary of all your quality tests at the table level {% image src="/images/v1.0/features/ingestion/workflows/data-quality/table-results-entity.png" diff --git a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/data-quality/tests.md b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/data-quality/tests.md index 0666ecd5d9fb..69046df96cad 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/data-quality/tests.md +++ b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/data-quality/tests.md @@ -237,7 +237,7 @@ Validate a list of table column name matches an expected set of columns | ----------- | ----------- | |[`ordered=False`] `columnNames` **matches** the list of column names in the table **regardless of the order**|Success ✅| |[`ordered=True`] `columnNames` **matches** the list of column names in the table **in the corresponding order** (e.g. `["a","b"] == ["a","b"]`| Success ✅| -|[`ordered=fALSE`] `columnNames` **does no match** the list of column names in the table **regardless of the order**|Failed ❌| +|[`ordered=FALSE`] `columnNames` **does no match** the list of column names in the table **regardless of the order**|Failed ❌| |[`ordered=True`] `columnNames` **does no match** the list of column names in the table **and/or the corresponding order** (e.g. `["a","b"] != ["b","a"]`|Failed ❌| **YAML Config** @@ -332,7 +332,7 @@ Validate the number of rows inserted for the defined period is between the expec {% note %} -The Table Row Inserted Count To Be Between cannot be executed against tables that have configured a partition in OpenMetadata. The logic of the test performed will be similar to executiong a Table Row Count to be Between test against a table with a partition configured. +The Table Row Inserted Count To Be Between cannot be executed against tables that have configured a partition in OpenMetadata. The logic of the test performed will be similar to executing a Table Row Count to be Between test against a table with a partition configured. {% /note %} @@ -499,7 +499,7 @@ This test allows us to specify how many values in a column we expect that will m - mariaDB - sqlite - clickhouse -- snowfalke +- snowflake The other databases will fall back to the `LIKE` expression @@ -546,7 +546,7 @@ This test allows us to specify values in a column we expect that will not match - mariaDB - sqlite - clickhouse -- snowfalke +- snowflake The other databases will fall back to the `LIKE` expression diff --git a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/dbt-troubleshooting.md b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/dbt-troubleshooting.md index 223a3c52c453..c343b7d5ce8f 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/dbt-troubleshooting.md +++ b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/dbt-troubleshooting.md @@ -7,7 +7,7 @@ slug: /connectors/ingestion/workflows/dbt/dbt-troubleshooting ### 1. dbt tab not displaying in the UI -After the dbt workflow is finished, check the logs to see if the dbt files were successfuly validated or not. Any missing keys in the manifest.json or catalog.json files will displayed in the logs and those keys are needed to be added. +After the dbt workflow is finished, check the logs to see if the dbt files were successfully validated or not. Any missing keys in the manifest.json or catalog.json files will displayed in the logs and those keys are needed to be added. The dbt workflow requires the below keys to be present in the node of a manifest.json file: - resource_type (required) @@ -15,7 +15,7 @@ The dbt workflow requires the below keys to be present in the node of a manifest - schema (required) - description (required if description needs to be updated) - compiled_code/compiled_sql (required if the dbt model query is to be shown in dbt tab and for query lineage) -- depends_on (required if lineage information needs to exctracted) +- depends_on (required if lineage information needs to extracted) - columns (required if column description is to be processed) {% note %} diff --git a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-cli.md b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-cli.md index 80f1a1c97d0c..988366b74b09 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-cli.md +++ b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-cli.md @@ -24,7 +24,7 @@ We can create a workflow that will obtain the dbt information from the dbt files ### 1. Create the workflow configuration Configure the dbt.yaml file according keeping only one of the required source (local, http, gcs, s3). -The dbt files should be present on the source mentioned and should have the necssary permissions to be able to access the files. +The dbt files should be present on the source mentioned and should have the necessary permissions to be able to access the files. Enter the name of your database service from OpenMetadata in the `serviceName` key in the yaml diff --git a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md index 993eca033a96..8010da7ff8fb 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md +++ b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md @@ -7,7 +7,7 @@ slug: /connectors/ingestion/workflows/dbt/ingest-dbt-lineage Ingest the lineage information from dbt `manifest.json` file into OpenMetadata. -OpenMetadata exctracts the lineage information from the `depends_on` and `compiled_query/compiled_code` keys from the manifest file. +OpenMetadata extracts the lineage information from the `depends_on` and `compiled_query/compiled_code` keys from the manifest file. ### 1. Lineage information from dbt "depends_on" key Openmetadata fetches the lineage information from the `manifest.json` file. Below is a sample `manifest.json` file node containing lineage information under `node_name->depends_on->nodes`. diff --git a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md index 1429db67d757..fc6bca49a174 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md +++ b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md @@ -119,7 +119,7 @@ If the owner's name in `manifest.json` or `catalog.json` file is `openmetadata`, ## Linking the Owner to the table -After runing the ingestion workflow with dbt you can see the created user or team getting linked to the table as it's owner as it was specified in the `manifest.json` or `catalog.json` file. +After running the ingestion workflow with dbt you can see the created user or team getting linked to the table as it's owner as it was specified in the `manifest.json` or `catalog.json` file. {% image src="/images/v1.0/features/ingestion/workflows/dbt/ingest_dbt_owner/linked-user.png" diff --git a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/profiler/index.md b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/profiler/index.md index c34d13964d36..15dce76871da 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/profiler/index.md +++ b/openmetadata-docs/content/v1.0.x/connectors/ingestion/workflows/profiler/index.md @@ -135,7 +135,7 @@ Once you have picked the `Interval Type` you will need to define the configurati - `YEAR` `COLUMN-VALUE` -- `Value`: a list of value to use for the partitionning logic +- `Value`: a list of value to use for the partitioning logic `INTEGER-RANGE` - `Start Range`: the start of the range (inclusive) @@ -376,7 +376,7 @@ Profiling all the tables in your data platform might not be the most optimized a When setting up a profiler workflow, you have the possibility to filter out/in certain databases, schemas, or tables. Using this feature will greatly help you narrow down which table you want to profile. -### 2. Sampling and Partitionning your Tables +### 2. Sampling and Partitioning your Tables On a table asset, you have the possibility to add a sample percentage/rows and a partitioning logic. Doing so will significantly reduce the amount of data scanned and the computing power required to perform the different operations. For sampling, you can set a sampling percentage at the workflow level. diff --git a/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/airflow.md b/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/airflow.md index d284465cd94e..0b0dc153ecc1 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/airflow.md +++ b/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/airflow.md @@ -21,7 +21,7 @@ To deploy OpenMetadata, check the Deployment guides. To run the Ingestion via the UI you'll need to use the OpenMetadata Ingestion Container, which comes shipped with custom Airflow plugins to handle the workflow deployment. -**Note:** For metadata ingestion, kindly make sure add alteast `data` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/cli.md b/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/cli.md index 2b57d9d98376..d30b484b4f1c 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/cli.md +++ b/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/cli.md @@ -21,7 +21,7 @@ To deploy OpenMetadata, check the Deployment guides. To run the Ingestion via the UI you'll need to use the OpenMetadata Ingestion Container, which comes shipped with custom Airflow plugins to handle the workflow deployment. -**Note:** For metadata ingestion, kindly make sure add alteast `data` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/index.md b/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/index.md index ca552ab43b03..81ece82e7767 100644 --- a/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/index.md +++ b/openmetadata-docs/content/v1.0.x/connectors/pipeline/domo-pipeline/index.md @@ -40,7 +40,7 @@ custom Airflow plugins to handle the workflow deployment. -**Note:** For metadata ingestion, kindly make sure add alteast `data` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ## Metadata Ingestion diff --git a/openmetadata-docs/content/v1.0.x/deployment/docker/volumes.md b/openmetadata-docs/content/v1.0.x/deployment/docker/volumes.md index 66f8cf847c69..f57f232a18d7 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/docker/volumes.md +++ b/openmetadata-docs/content/v1.0.x/deployment/docker/volumes.md @@ -56,7 +56,7 @@ services: ... ``` ## Volumes for ingestion container -Following are the changes we have to do while mounting the directory for ingestion in OpenMetadata. Here we will maintaing different directory for dag_generated_configs, dags and secrets. +Following are the changes we have to do while mounting the directory for ingestion in OpenMetadata. Here we will maintaining different directory for dag_generated_configs, dags and secrets. - Remove the below section from the docker-compose.yml file. Open the file `docker-compose.yml` downloaded from the Release page [Link](https://github.com/open-metadata/OpenMetadata/releases/download/0.13.0-release/docker-compose.yml) . @@ -81,7 +81,7 @@ services: ... ``` -Once these changes are done in the docker-compose.yml file It should look simlarly in the below format +Once these changes are done in the docker-compose.yml file It should look similarly in the below format ```commandline version: "3.9" diff --git a/openmetadata-docs/content/v1.0.x/deployment/kubernetes/gke.md b/openmetadata-docs/content/v1.0.x/deployment/kubernetes/gke.md index d156842d32f0..76b1c91265b3 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/kubernetes/gke.md +++ b/openmetadata-docs/content/v1.0.x/deployment/kubernetes/gke.md @@ -110,7 +110,7 @@ kubectl create -f nfs-server-deployment.yml kubectl create -f nfs-cluster-ip-service.yml ``` -We create a CluserIP Service for pods to access NFS within the cluster at a fixed IP/DNS. +We create a ClusterIP Service for pods to access NFS within the cluster at a fixed IP/DNS. ### Provision NFS backed PV and PVC for Airflow DAGs and Airflow Logs diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/bare-metal.md b/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/bare-metal.md index c396e6172c82..e75600c67ceb 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/bare-metal.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/bare-metal.md @@ -20,7 +20,7 @@ It is important to leave the publicKeys configuration to have both Amazon Cognit 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/docker.md b/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/docker.md index 53d431c423ff..119ac296d162 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/docker.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/docker.md @@ -24,7 +24,7 @@ It is important to leave the publicKeys configuration to have both Amazon Cognit 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/kubernetes.md b/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/kubernetes.md index 8786e604f828..12d022fb2c2c 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/kubernetes.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/amazon-cognito-sso/kubernetes.md @@ -24,7 +24,7 @@ It is important to leave the publicKeys configuration to have both Amazon Cognit 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/auth0/bare-metal.md b/openmetadata-docs/content/v1.0.x/deployment/security/auth0/bare-metal.md index 28114620851d..4b549551dcd3 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/auth0/bare-metal.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/auth0/bare-metal.md @@ -29,7 +29,7 @@ It is important to leave the publicKeys configuration to have both Auth0 public 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/auth0/docker.md b/openmetadata-docs/content/v1.0.x/deployment/security/auth0/docker.md index cb9a7b84b4a2..1d92884e4fee 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/auth0/docker.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/auth0/docker.md @@ -24,7 +24,7 @@ It is important to leave the publicKeys configuration to have both Auth0 public 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/auth0/kubernetes.md b/openmetadata-docs/content/v1.0.x/deployment/security/auth0/kubernetes.md index da6791ffc187..e71a36b83cb4 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/auth0/kubernetes.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/auth0/kubernetes.md @@ -23,7 +23,7 @@ It is important to leave the publicKeys configuration to have both Auth0 public 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/bare-metal.md b/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/bare-metal.md index 97c71e405d8f..53d72fca4345 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/bare-metal.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/bare-metal.md @@ -22,7 +22,7 @@ It is important to leave the publicKeys configuration to have both Custom OIDC p 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/docker.md b/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/docker.md index a9d4ed57e2ba..3141a45f502a 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/docker.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/docker.md @@ -24,7 +24,7 @@ It is important to leave the publicKeys configuration to have both Custom OIDC p 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/kubernetes.md b/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/kubernetes.md index b841a737f0aa..ed91873c1a65 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/kubernetes.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/custom-oidc/kubernetes.md @@ -23,7 +23,7 @@ It is important to leave the publicKeys configuration to have both Custom OIDC p 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/google/bare-metal.md b/openmetadata-docs/content/v1.0.x/deployment/security/google/bare-metal.md index 86eb321e9f1e..4d555292c901 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/google/bare-metal.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/google/bare-metal.md @@ -34,7 +34,7 @@ It is important to leave the publicKeys configuration to have both google public 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/google/docker.md b/openmetadata-docs/content/v1.0.x/deployment/security/google/docker.md index d507f4157049..194377c4a3c6 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/google/docker.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/google/docker.md @@ -25,7 +25,7 @@ It is important to leave the publicKeys configuration to have both google public 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/google/kubernetes.md b/openmetadata-docs/content/v1.0.x/deployment/security/google/kubernetes.md index ee35b52441df..5e1de0ae76a6 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/google/kubernetes.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/google/kubernetes.md @@ -24,7 +24,7 @@ It is important to leave the publicKeys configuration to have both google public 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/bare-metal.md b/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/bare-metal.md index 518dc62c32bd..c2e9fe4ee59f 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/bare-metal.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/bare-metal.md @@ -23,7 +23,7 @@ It is important to leave the publicKeys configuration to have both Keycloak publ 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/docker.md b/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/docker.md index 22937804da42..2966fed6f24a 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/docker.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/docker.md @@ -24,7 +24,7 @@ It is important to leave the publicKeys configuration to have both Keycloak Cogn 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/index.md b/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/index.md index 861096ac5c86..fd1ce5c836e5 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/index.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/index.md @@ -43,7 +43,7 @@ Security requirements for your **production** environment: {% image src="/images/v1.0/deployment/security/keycloak/3-add-client.png" alt="add-client" /%} ### Step 4: Edit settings of the client -- Change "Acess Type" value from "public" to "confidential". +- Change "Access Type" value from "public" to "confidential". - Change "implicit flow" and "service accounts" to enabled. {% image src="/images/v1.0/deployment/security/keycloak/4-edit-settings-client.png" alt="edit-settings-client" /%} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/kubernetes.md b/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/kubernetes.md index 45fd46c36a40..a20046a0460d 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/kubernetes.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/keycloak/kubernetes.md @@ -27,7 +27,7 @@ It is important to leave the publicKeys configuration to have both Keycloak publ 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/okta/bare-metal.md b/openmetadata-docs/content/v1.0.x/deployment/security/okta/bare-metal.md index c7d0fc5b054d..296d88ae8c2d 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/okta/bare-metal.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/okta/bare-metal.md @@ -18,7 +18,7 @@ It is important to leave the publicKeys configuration to have both Okta public k 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/okta/docker.md b/openmetadata-docs/content/v1.0.x/deployment/security/okta/docker.md index 84f4a325bac5..c44f724985d8 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/okta/docker.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/okta/docker.md @@ -26,7 +26,7 @@ It is important to leave the publicKeys configuration to have both Okta public k 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/okta/kubernetes.md b/openmetadata-docs/content/v1.0.x/deployment/security/okta/kubernetes.md index 6cdbd1dc96aa..915b8e1f003e 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/okta/kubernetes.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/okta/kubernetes.md @@ -25,7 +25,7 @@ It is important to leave the publicKeys configuration to have both Amazon Cognit 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/okta/troubleshoot.md b/openmetadata-docs/content/v1.0.x/deployment/security/okta/troubleshoot.md index d1eb44a91695..f90624049f1e 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/okta/troubleshoot.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/okta/troubleshoot.md @@ -5,7 +5,7 @@ slug: /deployment/security/okta/troubleshoot # Troubleshooting Okta SSO -### Troubleshooting Ingesion with Okta SSO via CLI or Ariflow +### Troubleshooting Ingesion with Okta SSO via CLI or Airflow - **AuthenticationException**: During metadata ingestion process if you face the see the error `AuthenticationException` with message `Could not fetch the access token please validate the orgURL & clientId in configuration`, One of the possible reason for this error could be that you are passing incorrect `clientId` in the `securityConfig`, Make sure you are passing `clientId` of the Ingestion Client (i.e the service application) and not the Single Page Application. If the `clientId` provided is correct and you are still facing this error then please also validate the `orgURL`, expected value for `orgURL` field is `/v1/token` diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/one-login/bare-metal.md b/openmetadata-docs/content/v1.0.x/deployment/security/one-login/bare-metal.md index d878dd85eb01..345dcc978aa5 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/one-login/bare-metal.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/one-login/bare-metal.md @@ -22,7 +22,7 @@ It is important to leave the publicKeys configuration to have both OneLogin SSO 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/one-login/docker.md b/openmetadata-docs/content/v1.0.x/deployment/security/one-login/docker.md index 17d11039b9b2..0f1a897ab094 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/one-login/docker.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/one-login/docker.md @@ -24,7 +24,7 @@ It is important to leave the publicKeys configuration to have both Amazon Cognit 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/one-login/kubernetes.md b/openmetadata-docs/content/v1.0.x/deployment/security/one-login/kubernetes.md index 5dddbda6775a..4a527d2f9d61 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/one-login/kubernetes.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/one-login/kubernetes.md @@ -23,7 +23,7 @@ It is important to leave the publicKeys configuration to have both Amazon Cognit 3. Important to update the URLs documented in below configuration. The below config reflects a setup where all dependencies are hosted in a single host. Example openmetadata:8585 might not be the same domain you may be using in your installation. 4. OpenMetadata ships default public/private key, These must be changed in your production deployment to avoid any security issues. -For more details, follow [Enabling JWT Authenticaiton](deployment/security/enable-jwt-tokens) +For more details, follow [Enabling JWT Authentication](deployment/security/enable-jwt-tokens) {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/security/saml/index.md b/openmetadata-docs/content/v1.0.x/deployment/security/saml/index.md index afee06b34ace..8fc01846bb19 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/security/saml/index.md +++ b/openmetadata-docs/content/v1.0.x/deployment/security/saml/index.md @@ -75,7 +75,7 @@ Every IDP provides this information, we can download the XML Metadata and config 1. EntityId/Authority -> Normally a Url providing info about the provider. 2. SignOn Url -> Url to be used for signing purpose. -3. X509 Certificate -> In case the SP expects a signed reponse from IDP, the IDP can be configured with Signing Certificate given by SP. +3. X509 Certificate -> In case the SP expects a signed response from IDP, the IDP can be configured with Signing Certificate given by SP. 4. Private Key -> In case SP expects a encrypted response from the IDP , the IDP can be configured with SPs public key for encryption and the Private Key can be used for SP for decrypting. SP Metadata XML is available at "http://localhost:8585/api/v1/saml/metadata", `localhost` needs to be updated with the correct URI. diff --git a/openmetadata-docs/content/v1.0.x/deployment/upgrade/bare-metal.md b/openmetadata-docs/content/v1.0.x/deployment/upgrade/bare-metal.md index 6d2da025a350..6fec7ad2829e 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/upgrade/bare-metal.md +++ b/openmetadata-docs/content/v1.0.x/deployment/upgrade/bare-metal.md @@ -16,7 +16,7 @@ This guide assumes that you have an OpenMetadata deployment that you installed a {% note noteType="Warning" %} -It is adviced to go through [openmetadata release notes](/deployment/upgrade/versions/013-to-100) before starting the upgrade process. +It is advised to go through [openmetadata release notes](/deployment/upgrade/versions/013-to-100) before starting the upgrade process. {% /note %} diff --git a/openmetadata-docs/content/v1.0.x/deployment/upgrade/kubernetes.md b/openmetadata-docs/content/v1.0.x/deployment/upgrade/kubernetes.md index 086ccb2cd6fa..826d2815e34e 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/upgrade/kubernetes.md +++ b/openmetadata-docs/content/v1.0.x/deployment/upgrade/kubernetes.md @@ -116,7 +116,7 @@ If you are running into any issues, double-check what are the default values of {% /note %} -## Upgrade OpenMetdata +## Upgrade OpenMetadata We upgrade OpenMetadata with the below command: diff --git a/openmetadata-docs/content/v1.0.x/deployment/upgrade/versions/011-to-012.md b/openmetadata-docs/content/v1.0.x/deployment/upgrade/versions/011-to-012.md index 885c65919b9e..9250101071e4 100644 --- a/openmetadata-docs/content/v1.0.x/deployment/upgrade/versions/011-to-012.md +++ b/openmetadata-docs/content/v1.0.x/deployment/upgrade/versions/011-to-012.md @@ -67,7 +67,7 @@ In the `processor` you can now configure: In OpenMetadata 0.12 we have migrated the metrics computation to multithreading. This migration reduced metrics computation time by 70%. -Snowflake users may experience a circular import error. This is a known issue with `snowflake-connector-python`. If you experience such error we recommend to either 1) run the ingestion workflow in Python 3.8 environment or 2) if you can't manage your environement set `ThreadCount` to 1. You can find more information on the profiler setting [here](/connectors/ingestion/workflows/profiler) +Snowflake users may experience a circular import error. This is a known issue with `snowflake-connector-python`. If you experience such error we recommend to either 1) run the ingestion workflow in Python 3.8 environment or 2) if you can't manage your environment set `ThreadCount` to 1. You can find more information on the profiler setting [here](/connectors/ingestion/workflows/profiler) ### Airflow Version @@ -180,7 +180,7 @@ authorizerConfiguration: ... ``` -- If you are using [docker](/deployment/docker) installation with your custom env file, update all the environement variables from `org.openmetadata.catalog.*` to `org.openmetadata.service.*`. +- If you are using [docker](/deployment/docker) installation with your custom env file, update all the environment variables from `org.openmetadata.catalog.*` to `org.openmetadata.service.*`. ``` AUTHORIZER_CLASS_NAME=org.openmetadata.service.security.DefaultAuthorizer diff --git a/openmetadata-docs/content/v1.0.x/developers/architecture/code-layout.md b/openmetadata-docs/content/v1.0.x/developers/architecture/code-layout.md index 1d7b9b81a1e8..beeeb1e1660c 100644 --- a/openmetadata-docs/content/v1.0.x/developers/architecture/code-layout.md +++ b/openmetadata-docs/content/v1.0.x/developers/architecture/code-layout.md @@ -4,7 +4,7 @@ slug: /developers/architecture/code-layout --- # Understand Code Layout -Use this document as a quick start guide to begin developing in OpenMetdata. Below, we address the following topics: +Use this document as a quick start guide to begin developing in OpenMetadata. Below, we address the following topics: 1. Schema (Metadata Models) 2. APIs diff --git a/openmetadata-docs/content/v1.0.x/features/alerts-notifications/index.md b/openmetadata-docs/content/v1.0.x/features/alerts-notifications/index.md index 2e030f26cc5f..e742ac75478a 100644 --- a/openmetadata-docs/content/v1.0.x/features/alerts-notifications/index.md +++ b/openmetadata-docs/content/v1.0.x/features/alerts-notifications/index.md @@ -38,7 +38,7 @@ For slack configuration you will need to get the endpoint URL of the channel whe - **Secret Key**: Secret key can be used to secure the webhook connection. ### MS Teams -For MS Teams configuration you will need to get the endpoint URL if the chanel where you wish to send the alerts. You can find this by going to the Teams channel where you want the posts to appear, clicking the three dots `...`, and clicking "Connectors". Then add the "Incoming Webhook" connector. Copy this connector's URL and supply it here to OpenMetadata. It may be in the form of `https://your-domain.webhook.office.com/webhookb2/...@.../IncomingWebhook/.../...`. For more on MS Teams webhooks, see [Create an Incoming Webhook](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook). Additionally, you can configure the following parameter: +For MS Teams configuration you will need to get the endpoint URL if the channel where you wish to send the alerts. You can find this by going to the Teams channel where you want the posts to appear, clicking the three dots `...`, and clicking "Connectors". Then add the "Incoming Webhook" connector. Copy this connector's URL and supply it here to OpenMetadata. It may be in the form of `https://your-domain.webhook.office.com/webhookb2/...@.../IncomingWebhook/.../...`. For more on MS Teams webhooks, see [Create an Incoming Webhook](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook). Additionally, you can configure the following parameter: - **Batch Size**: size of the batch that will be sent to the endpoint. - **Connection Timeout**: timeout for the connection. - **Secret Key**: Secret key can be used to secure the webhook connection. diff --git a/openmetadata-docs/content/v1.0.x/features/data-insight/index.md b/openmetadata-docs/content/v1.0.x/features/data-insight/index.md index 0a17fc0b5f82..b60bfbf81dc3 100644 --- a/openmetadata-docs/content/v1.0.x/features/data-insight/index.md +++ b/openmetadata-docs/content/v1.0.x/features/data-insight/index.md @@ -61,7 +61,7 @@ This chart shows the top 10 data assets the most viewed in your platform. It off /%} **Page views by data assets** -This chart shows the total number of page views by asset type. This allows you to understand which asset familly drives the most interest in your organization +This chart shows the total number of page views by asset type. This allows you to understand which asset family drives the most interest in your organization {% image src="/images/v1.0/features/data-insight/views-by-assets.png" @@ -118,7 +118,7 @@ Add any elasticsearch configuration relevant to your setup. Note that if you are caption="Data Insight Ingestion ES Config" /%} -Choose a schedule exection time for your workflow. The schedule time is displayed in UTC. We recommend to run this workflow overnight or when activity on the platform is at its lowest to ensure accurate data. +Choose a schedule execution time for your workflow. The schedule time is displayed in UTC. We recommend to run this workflow overnight or when activity on the platform is at its lowest to ensure accurate data. {% image src="/images/v1.0/features/data-insight/data-insight-ingestion-schedule.png" diff --git a/openmetadata-docs/content/v1.0.x/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md b/openmetadata-docs/content/v1.0.x/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md index e1d53fdc5cfc..2e8c93fc0717 100644 --- a/openmetadata-docs/content/v1.0.x/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md +++ b/openmetadata-docs/content/v1.0.x/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md @@ -10,7 +10,7 @@ slug: /main-concepts/metadata-standard/schemas/tests/table/tablecolumntomatchset ## Properties - **`columnNames`** *(string)*: Expected columns of the table to match the ones in {columnValuesSet}. -- **`ordered`** *(boolean)*: Wether or not to considered the order of the list when performing the match. Default: `False`. +- **`ordered`** *(boolean)*: Whether or not to considered the order of the list when performing the match. Default: `False`. Documentation file automatically generated at 2022-07-14 10:51:34.749986. diff --git a/openmetadata-docs/content/v1.0.x/quick-start/local-deployment.md b/openmetadata-docs/content/v1.0.x/quick-start/local-deployment.md index 6d2cc9246bd0..e9ad8290950b 100644 --- a/openmetadata-docs/content/v1.0.x/quick-start/local-deployment.md +++ b/openmetadata-docs/content/v1.0.x/quick-start/local-deployment.md @@ -107,7 +107,7 @@ Follow the instructions [here](https://docs.docker.com/compose/cli-command/#inst - Install [Docker for Windows](https://www.docker.com/products/docker-desktop) - Once installed, please follow the steps [here](https://docs.docker.com/desktop/windows/wsl/) and complete all the pre-requisites for a seamless installation and deployment. - After completion of the pre-requisites, please install `python3-pip` and `python3-venv` on your Ubuntu system. - - Command: `apt install python3-pip python3-venv` (Ensure that you have the priviledge to install packages, if not, please use Super User.) + - Command: `apt install python3-pip python3-venv` (Ensure that you have the privilege to install packages, if not, please use Super User.) ## Procedure diff --git a/openmetadata-docs/content/v1.0.x/releases/roadmap/index.md b/openmetadata-docs/content/v1.0.x/releases/roadmap/index.md index 3acc643f5eea..36dd95a04830 100644 --- a/openmetadata-docs/content/v1.0.x/releases/roadmap/index.md +++ b/openmetadata-docs/content/v1.0.x/releases/roadmap/index.md @@ -37,7 +37,7 @@ You can check the latest release [here](/releases/all-releases). {% tile title="Alerts & Notifications" %} - Durable queue to store ChangeEvents guaranteeing at-least-once semantics -- Live BulkActions on ElasticSearch to handle Tag category deletion, owner chagne propagation etc.. +- Live BulkActions on ElasticSearch to handle Tag category deletion, owner change propagation etc.. - Support to get notifications via email when a user is mentioned {% /tile %} diff --git a/openmetadata-docs/content/v1.1.x/connectors/dashboard/superset/index.md b/openmetadata-docs/content/v1.1.x/connectors/dashboard/superset/index.md index 2ef40a560f04..ee29c1cc8527 100644 --- a/openmetadata-docs/content/v1.1.x/connectors/dashboard/superset/index.md +++ b/openmetadata-docs/content/v1.1.x/connectors/dashboard/superset/index.md @@ -29,7 +29,7 @@ The ingestion also works with Superset 2.0.0 🎉 **API Connection**: To extract metadata from Superset via API, user must have at least `can read on Chart` & `can read on Dashboard` permissions. -**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` priviledge on `dashboards` & `slices` tables within superset schema. +**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` privilege on `dashboards` & `slices` tables within superset schema. ## Metadata Ingestion diff --git a/openmetadata-docs/content/v1.1.x/connectors/database/bigquery/roles.md b/openmetadata-docs/content/v1.1.x/connectors/database/bigquery/roles.md index 797e167c50dd..64bde59ddb20 100644 --- a/openmetadata-docs/content/v1.1.x/connectors/database/bigquery/roles.md +++ b/openmetadata-docs/content/v1.1.x/connectors/database/bigquery/roles.md @@ -42,19 +42,21 @@ You can search for the required permissions in the filter box and add them accor -| # | GCP Permission | Required For | -|:----|:------------------------------|:----------------------------------| -| 1 | bigquery.datasets.get | Metadata Ingestion | -| 2 | bigquery.tables.get | Metadata Ingestion | -| 3 | bigquery.tables.getData | Metadata Ingestion | -| 4 | bigquery.tables.list | Metadata Ingestion | -| 5 | resourcemanager.projects.get | Metadata Ingestion | -| 6 | bigquery.jobs.create | Metadata Ingestion | -| 7 | bigquery.jobs.listAll | Metadata Ingestion | -| 8 | datacatalog.taxonomies.get | Fetch Policy Tags | -| 9 | datacatalog.taxonomies.list | Fetch Policy Tags | -| 10 | bigquery.readsessions.create | Bigquery Usage & Lineage Workflow | -| 11 | bigquery.readsessions.getData | Bigquery Usage & Lineage Workflow | +| # | GCP Permission | Required For | +|:----|:-------------------------------|:----------------------------------| +| 1 | bigquery.datasets.get | Metadata Ingestion | +| 2 | bigquery.tables.get | Metadata Ingestion | +| 3 | bigquery.tables.getData | Metadata Ingestion | +| 4 | bigquery.tables.list | Metadata Ingestion | +| 5 | resourcemanager.projects.get | Metadata Ingestion | +| 6 | bigquery.jobs.create | Metadata Ingestion | +| 7 | bigquery.jobs.listAll | Metadata Ingestion | +| 8 | bigquery.routines.get | Stored Procedure | +| 9 | bigquery.routines.list | Stored Procedure | +| 10 | datacatalog.taxonomies.get | Fetch Policy Tags | +| 11 | datacatalog.taxonomies.list | Fetch Policy Tags | +| 12 | bigquery.readsessions.create | Bigquery Usage & Lineage Workflow | +| 13 | bigquery.readsessions.getData | Bigquery Usage & Lineage Workflow | {% image src="/images/v1.1/connectors/bigquery/create-role-4.png" diff --git a/openmetadata-docs/content/v1.1.x/connectors/database/mysql/index.md b/openmetadata-docs/content/v1.1.x/connectors/database/mysql/index.md index eae2c92cc124..e48dba111577 100644 --- a/openmetadata-docs/content/v1.1.x/connectors/database/mysql/index.md +++ b/openmetadata-docs/content/v1.1.x/connectors/database/mysql/index.md @@ -44,7 +44,7 @@ Configure and schedule MySQL metadata and profiler workflows from the OpenMetada Note that We support MySQL (version 8.0.0 or greater) and the user should have access to the `INFORMATION_SCHEMA` table. By default, a user can see only the rows in the `INFORMATION_SCHEMA` that correspond to objects for which the user has the proper access privileges. ```SQL --- Create user. If is ommited, defaults to '%' +-- Create user. If is omitted, defaults to '%' -- More details https://dev.mysql.com/doc/refman/8.0/en/create-user.html CREATE USER ''[@''] IDENTIFIED BY ''; diff --git a/openmetadata-docs/content/v1.1.x/connectors/database/sap-hana/index.md b/openmetadata-docs/content/v1.1.x/connectors/database/sap-hana/index.md index fe76061979d2..f284c361bbbf 100644 --- a/openmetadata-docs/content/v1.1.x/connectors/database/sap-hana/index.md +++ b/openmetadata-docs/content/v1.1.x/connectors/database/sap-hana/index.md @@ -94,7 +94,7 @@ We support two possible connection types: - **database**: Optional parameter to connect to a specific database. - **databaseSchema**: databaseSchema of the data source. This is an optional parameter, if you would like to restrict the metadata reading to a single schema. When left blank, OpenMetadata Ingestion attempts to scan all the schemas. -**HDB USer Store** +**HDB USet Store** - **User Key**: HDB Store User Key generated from the command `hdbuserstore SET `. diff --git a/openmetadata-docs/content/v1.1.x/connectors/ingestion/deployment/index.md b/openmetadata-docs/content/v1.1.x/connectors/ingestion/deployment/index.md index 269eb18f4509..cb1bf3436b88 100644 --- a/openmetadata-docs/content/v1.1.x/connectors/ingestion/deployment/index.md +++ b/openmetadata-docs/content/v1.1.x/connectors/ingestion/deployment/index.md @@ -70,7 +70,7 @@ information received in the shape of an `IngestionPipeline` Entity, and the spec After creating a new workflow from the UI or when editing it, there are two calls happening: - `POST` or `PUT` call to update the `Ingestion Pipeline Entity`, -- `/deploy` HTTP call to the `IngestionPipelienResource` to trigger the deployment of the new or updated DAG in the Orchestrator. +- `/deploy` HTTP call to the `IngestionPipelineResource` to trigger the deployment of the new or updated DAG in the Orchestrator. {% image src="/images/v1.1/features/ingestion/ingestion-pipeline/ingestion-pipeline-software-system.drawio.png" diff --git a/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/data-quality/index.md b/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/data-quality/index.md index f7b797f616c1..87adb69faff6 100644 --- a/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/data-quality/index.md +++ b/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/data-quality/index.md @@ -157,7 +157,7 @@ processor: ``` The processor type should be set to ` "orm-test-runner"`. For accepted test definition names and parameter value names refer to the [tests page](/connectors/ingestion/workflows/data-quality/tests). -### Key referece: +### Key reference: - `forceUpdate`: if the test case exists (base on the test case name) for the entity, implements the strategy to follow when running the test (i.e. whether to update parameters) - `testCases`: list of test cases to execute against the entity referenced - `name`: test case name @@ -184,7 +184,7 @@ processor: config: forceUpdate: false testCases: - - name: column_value_lenght_tagFQN + - name: column_value_length_tagFQN testDefinitionName: columnValueLengthsToBeBetween columnName: tagFQN parameterValues: @@ -303,7 +303,7 @@ From there you can select a Test Suite and visualize the results associated with ### From a Table Entity Navigate to your table and click on the `profiler & Data Quality` tab. From there you'll be able to see test results at the table or column level. #### Table Level Test Results -In the top pannel, click on the white background `Data Quality` button. This will bring you to a summary of all your quality tests at the table level +In the top panel, click on the white background `Data Quality` button. This will bring you to a summary of all your quality tests at the table level {% image src="/images/v1.1/features/ingestion/workflows/data-quality/table-results-entity.png" diff --git a/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/data-quality/tests.md b/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/data-quality/tests.md index b8bed75859fc..1ea2d5181fdb 100644 --- a/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/data-quality/tests.md +++ b/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/data-quality/tests.md @@ -237,7 +237,7 @@ Validate a list of table column name matches an expected set of columns |----------------------------------------------------------------------------------------------------------------------------------------------------------|-----------| | [`ordered=False`] `columnNames` **matches** the list of column names in the table **regardless of the order** | Success ✅ | | [`ordered=True`] `columnNames` **matches** the list of column names in the table **in the corresponding order** (e.g. `["a","b"] == ["a","b"]` | Success ✅ | -| [`ordered=fALSE`] `columnNames` **does no match** the list of column names in the table **regardless of the order** | Failed ❌ | +| [`ordered=FALSE`] `columnNames` **does no match** the list of column names in the table **regardless of the order** | Failed ❌ | | [`ordered=True`] `columnNames` **does no match** the list of column names in the table **and/or the corresponding order** (e.g. `["a","b"] != ["b","a"]` | Failed ❌ | **YAML Config** @@ -499,7 +499,7 @@ This test allows us to specify how many values in a column we expect that will m - mariaDB - sqlite - clickhouse -- snowfalke +- snowflake The other databases will fall back to the `LIKE` expression @@ -546,7 +546,7 @@ This test allows us to specify values in a column we expect that will not match - mariaDB - sqlite - clickhouse -- snowfalke +- snowflake The other databases will fall back to the `LIKE` expression diff --git a/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md b/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md index 8ccd5b5cc9a4..319d74d5be2f 100644 --- a/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md +++ b/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md @@ -119,7 +119,7 @@ If the owner's name in `manifest.json` or `catalog.json` file is `openmetadata`, ## Linking the Owner to the table -After runing the ingestion workflow with dbt you can see the created user or team getting linked to the table as it's owner as it was specified in the `manifest.json` or `catalog.json` file. +After running the ingestion workflow with dbt you can see the created user or team getting linked to the table as it's owner as it was specified in the `manifest.json` or `catalog.json` file. {% image src="/images/v1.1/features/ingestion/workflows/dbt/ingest_dbt_owner/linked-user.png" diff --git a/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md b/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md index 3c55d826da69..d3d039b09423 100644 --- a/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md +++ b/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md @@ -24,7 +24,7 @@ We can create a workflow that will obtain the dbt information from the dbt files ### 1. Create the workflow configuration Configure the dbt.yaml file according keeping only one of the required source (local, http, gcp, s3). -The dbt files should be present on the source mentioned and should have the necssary permissions to be able to access the files. +The dbt files should be present on the source mentioned and should have the necessary permissions to be able to access the files. Enter the name of your database service from OpenMetadata in the `serviceName` key in the yaml diff --git a/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/profiler/index.md b/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/profiler/index.md index e2b535942ffa..0ca88675fd7d 100644 --- a/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/profiler/index.md +++ b/openmetadata-docs/content/v1.1.x/connectors/ingestion/workflows/profiler/index.md @@ -135,7 +135,7 @@ Once you have picked the `Interval Type` you will need to define the configurati - `YEAR` `COLUMN-VALUE` -- `Value`: a list of value to use for the partitionning logic +- `Value`: a list of value to use for the partitioning logic `INTEGER-RANGE` - `Start Range`: the start of the range (inclusive) @@ -375,7 +375,7 @@ Profiling all the tables in your data platform might not be the most optimized a When setting up a profiler workflow, you have the possibility to filter out/in certain databases, schemas, or tables. Using this feature will greatly help you narrow down which table you want to profile. -### 2. Sampling and Partitionning your Tables +### 2. Sampling and Partitioning your Tables On a table asset, you have the possibility to add a sample percentage/rows and a partitioning logic. Doing so will significantly reduce the amount of data scanned and the computing power required to perform the different operations. For sampling, you can set a sampling percentage at the workflow level. diff --git a/openmetadata-docs/content/v1.1.x/deployment/kubernetes/gke.md b/openmetadata-docs/content/v1.1.x/deployment/kubernetes/gke.md index fb7179405be9..963f2a64318e 100644 --- a/openmetadata-docs/content/v1.1.x/deployment/kubernetes/gke.md +++ b/openmetadata-docs/content/v1.1.x/deployment/kubernetes/gke.md @@ -195,7 +195,7 @@ kubectl create -f nfs-server-deployment.yml kubectl create -f nfs-cluster-ip-service.yml ``` -We create a CluserIP Service for pods to access NFS within the cluster at a fixed IP/DNS. +We create a ClusterIP Service for pods to access NFS within the cluster at a fixed IP/DNS. ### Provision NFS backed PV and PVC for Airflow DAGs and Airflow Logs diff --git a/openmetadata-docs/content/v1.1.x/deployment/security/saml/index.md b/openmetadata-docs/content/v1.1.x/deployment/security/saml/index.md index edc74271e5b4..5895ea795875 100644 --- a/openmetadata-docs/content/v1.1.x/deployment/security/saml/index.md +++ b/openmetadata-docs/content/v1.1.x/deployment/security/saml/index.md @@ -75,7 +75,7 @@ Every IDP provides this information, we can download the XML Metadata and config 1. EntityId/Authority -> Normally a URL providing info about the provider. 2. SignOn Url -> Url to be used for signing purpose. -3. X509 Certificate -> In case the SP expects a signed reponse from IDP, the IDP can be configured with Signing Certificate given by SP. +3. X509 Certificate -> In case the SP expects a signed response from IDP, the IDP can be configured with Signing Certificate given by SP. 4. Private Key -> In case SP expects an encrypted response from the IDP , the IDP can be configured with SPs public key for encryption and the Private Key can be used for SP for decrypting. SP Metadata XML is available at "http://localhost:8585/api/v1/saml/metadata", `localhost` needs to be updated with the correct URI. diff --git a/openmetadata-docs/content/v1.1.x/developers/architecture/code-layout.md b/openmetadata-docs/content/v1.1.x/developers/architecture/code-layout.md index 432eb1fbb040..9dd666b7c3ce 100644 --- a/openmetadata-docs/content/v1.1.x/developers/architecture/code-layout.md +++ b/openmetadata-docs/content/v1.1.x/developers/architecture/code-layout.md @@ -4,7 +4,7 @@ slug: /developers/architecture/code-layout --- # Understand Code Layout -Use this document as a quick start guide to begin developing in OpenMetdata. Below, we address the following topics: +Use this document as a quick start guide to begin developing in OpenMetadata. Below, we address the following topics: 1. Schema (Metadata Models) 2. APIs diff --git a/openmetadata-docs/content/v1.1.x/features/alerts-notifications/index.md b/openmetadata-docs/content/v1.1.x/features/alerts-notifications/index.md index eb3769b8f9ed..9b4b173f81a3 100644 --- a/openmetadata-docs/content/v1.1.x/features/alerts-notifications/index.md +++ b/openmetadata-docs/content/v1.1.x/features/alerts-notifications/index.md @@ -38,7 +38,7 @@ For slack configuration you will need to get the endpoint URL of the channel whe - **Secret Key**: Secret key can be used to secure the webhook connection. ### MS Teams -For MS Teams configuration you will need to get the endpoint URL if the chanel where you wish to send the alerts. You can find this by going to the Teams channel where you want the posts to appear, clicking the three dots `...`, and clicking "Connectors". Then add the "Incoming Webhook" connector. Copy this connector's URL and supply it here to OpenMetadata. It may be in the form of `https://your-domain.webhook.office.com/webhookb2/...@.../IncomingWebhook/.../...`. For more on MS Teams webhooks, see [Create an Incoming Webhook](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook). Additionally, you can configure the following parameter: +For MS Teams configuration you will need to get the endpoint URL if the channel where you wish to send the alerts. You can find this by going to the Teams channel where you want the posts to appear, clicking the three dots `...`, and clicking "Connectors". Then add the "Incoming Webhook" connector. Copy this connector's URL and supply it here to OpenMetadata. It may be in the form of `https://your-domain.webhook.office.com/webhookb2/...@.../IncomingWebhook/.../...`. For more on MS Teams webhooks, see [Create an Incoming Webhook](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook). Additionally, you can configure the following parameter: - **Batch Size**: size of the batch that will be sent to the endpoint. - **Connection Timeout**: timeout for the connection. - **Secret Key**: Secret key can be used to secure the webhook connection. diff --git a/openmetadata-docs/content/v1.1.x/features/data-insight/index.md b/openmetadata-docs/content/v1.1.x/features/data-insight/index.md index f13d09e23285..38e28a5161b1 100644 --- a/openmetadata-docs/content/v1.1.x/features/data-insight/index.md +++ b/openmetadata-docs/content/v1.1.x/features/data-insight/index.md @@ -61,7 +61,7 @@ This chart shows the top 10 data assets the most viewed in your platform. It off /%} **Page views by data assets** -This chart shows the total number of page views by asset type. This allows you to understand which asset familly drives the most interest in your organization +This chart shows the total number of page views by asset type. This allows you to understand which asset family drives the most interest in your organization {% image src="/images/v1.1/features/data-insight/views-by-assets.png" diff --git a/openmetadata-docs/content/v1.1.x/how-to-guides/data-collaboration/index.md b/openmetadata-docs/content/v1.1.x/how-to-guides/data-collaboration/index.md index b589103f05ae..882a188e23d5 100644 --- a/openmetadata-docs/content/v1.1.x/how-to-guides/data-collaboration/index.md +++ b/openmetadata-docs/content/v1.1.x/how-to-guides/data-collaboration/index.md @@ -10,7 +10,7 @@ OpenMetadata is a catalyst for collaboration that brings data teams together to There are three important aspects of data collaboration in OpenMetadata: - **Conversations Threads:** Collaborate around data assets and tags by asking the right questions and discussing the details right within OpenMetadata. -- **Tasks:** Create tasks around data assets to create and update descriptions, request for tags, and initaite a glossary term approval workflow. +- **Tasks:** Create tasks around data assets to create and update descriptions, request for tags, and initiate a glossary term approval workflow. - **Announcements:** Announce to your entire team about the upcoming events and changes such as deprecation, deletion, or schema changes. diff --git a/openmetadata-docs/content/v1.1.x/how-to-guides/data-discovery/discover.md b/openmetadata-docs/content/v1.1.x/how-to-guides/data-discovery/discover.md index 3242136d7626..408f489fc3e2 100644 --- a/openmetadata-docs/content/v1.1.x/how-to-guides/data-discovery/discover.md +++ b/openmetadata-docs/content/v1.1.x/how-to-guides/data-discovery/discover.md @@ -34,7 +34,7 @@ alt="Filter by the Type of Data Asset" caption="Filter by the Type of Data Asset" /%} -Users can navigate to the Explore page for specific type of data assets and use the filter options relevant to that data assset to narrow down the search. +Users can navigate to the Explore page for specific type of data assets and use the filter options relevant to that data asset to narrow down the search. ## Filter by Asset Owner A team or a user can own the data asset in OpenMetadata. Users can filter data assets by the asset owner. With information on the data asset owners, you can direct your questions to the right person or team. diff --git a/openmetadata-docs/content/v1.1.x/how-to-guides/data-insights/data-culture.md b/openmetadata-docs/content/v1.1.x/how-to-guides/data-insights/data-culture.md index df57705c2540..4679c6cfa4f5 100644 --- a/openmetadata-docs/content/v1.1.x/how-to-guides/data-insights/data-culture.md +++ b/openmetadata-docs/content/v1.1.x/how-to-guides/data-insights/data-culture.md @@ -22,7 +22,7 @@ Data is a shared responsibility of the organization and requires an end-to-end a ### 1. Data Needs Clear Ownership -All important data must be owned. Individuals should not own important data assets. Team ownership is preffered over User ownership. It also pushes the data responsibility to a team instead of an individual user. +All important data must be owned. Individuals should not own important data assets. Team ownership is preferred over User ownership. It also pushes the data responsibility to a team instead of an individual user. ### 2. Measure What Matters @@ -60,7 +60,7 @@ Data without description is hard to use, resulting in the loss of productivity. ### 4. Develop Data Vocabulary -Data vocabulary helps in the consistent understanding of data. In OpenMetdata, using the [Glossary](/how-to-guides/data-governance/glossary-classification) feature, you can describe business terms and concepts in a single place. Also, the data assets can be labelled using these glossary terms in order to provide semantic meaning. +Data vocabulary helps in the consistent understanding of data. In OpenMetadata, using the [Glossary](/how-to-guides/data-governance/glossary-classification) feature, you can describe business terms and concepts in a single place. Also, the data assets can be labelled using these glossary terms in order to provide semantic meaning. ### 5 Identify Important Data with Tiers diff --git a/openmetadata-docs/content/v1.1.x/how-to-guides/data-insights/report.md b/openmetadata-docs/content/v1.1.x/how-to-guides/data-insights/report.md index e9c7a72076c6..4d63d0f615d1 100644 --- a/openmetadata-docs/content/v1.1.x/how-to-guides/data-insights/report.md +++ b/openmetadata-docs/content/v1.1.x/how-to-guides/data-insights/report.md @@ -96,7 +96,7 @@ caption="Most Viewed Data Assets" ### Page Views by Data Assets -It helps to understand the total number of page views by asset type. This allows you to understand which asset familly drives the most interest in your organization +It helps to understand the total number of page views by asset type. This allows you to understand which asset family drives the most interest in your organization {% image src="/images/v1.1/how-to-guides/insights/pvda.png" diff --git a/openmetadata-docs/content/v1.1.x/how-to-guides/user-guide-data-stewards/overview-data-assets/data-ownership.md b/openmetadata-docs/content/v1.1.x/how-to-guides/user-guide-data-stewards/overview-data-assets/data-ownership.md index bc2ab8bd2c63..096f7917ecd1 100644 --- a/openmetadata-docs/content/v1.1.x/how-to-guides/user-guide-data-stewards/overview-data-assets/data-ownership.md +++ b/openmetadata-docs/content/v1.1.x/how-to-guides/user-guide-data-stewards/overview-data-assets/data-ownership.md @@ -38,12 +38,12 @@ If no owner is selected, and if the Database or Database Schema has a owner, the OpenMetadata supports Owner Propagation and the owner will be propagated based on a top-down hierarchy. The owner of the Database will be auto-propagated as the owner of the Database Schemas and Tables under it. Similarly, the owner of the Database Schema will be auto-propagated as the owner of the Tables under it. -- Owner Propogation does not work for data assets that already have an Owner assigned to them. If there is **no owner**, then an Owner will be assigned based on the hierarchy. +- Owner Propagation does not work for data assets that already have an Owner assigned to them. If there is **no owner**, then an Owner will be assigned based on the hierarchy. - If a Database or Database Schema has an Owner assigned, and you **delete the owner** from the Database Schema or Tables under it, then the Owner will be auto-assigned in this case based on the existing Owner details at the top hierarchy. - You can also assign a different owner manually. -## Team Ownership is Preffered +## Team Ownership is Preferred OpenMetadata is a data collaboration platform. We highly recommend Team Ownership of data assets, because individual users will only have part of the context about the data asset in question. Assigning team ownership will give access to all the members of a particular team. Only teams of the type ‘**Groups**’ can own data assets. \ No newline at end of file diff --git a/openmetadata-docs/content/v1.1.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConection.md b/openmetadata-docs/content/v1.1.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConnection.md similarity index 97% rename from openmetadata-docs/content/v1.1.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConection.md rename to openmetadata-docs/content/v1.1.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConnection.md index 6bf330fe7c41..f60b6daf70dd 100644 --- a/openmetadata-docs/content/v1.1.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConection.md +++ b/openmetadata-docs/content/v1.1.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConnection.md @@ -1,5 +1,5 @@ --- -title: adlsConection +title: adlsConnection slug: /main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsconection --- diff --git a/openmetadata-docs/content/v1.1.x/main-concepts/metadata-standard/schemas/type/function.md b/openmetadata-docs/content/v1.1.x/main-concepts/metadata-standard/schemas/type/function.md index a7f9fa3360ba..8f9adfa909c5 100644 --- a/openmetadata-docs/content/v1.1.x/main-concepts/metadata-standard/schemas/type/function.md +++ b/openmetadata-docs/content/v1.1.x/main-concepts/metadata-standard/schemas/type/function.md @@ -11,7 +11,7 @@ slug: /main-concepts/metadata-standard/schemas/type/function - **`name`** *(string)*: Name of the function. - **`input`** *(string)*: Description of input taken by the function. -- **`description`** *(string)*: Description fo the function. +- **`description`** *(string)*: Description for the function. - **`examples`** *(array)*: Examples of the function to help users author conditions. - **`parameterInputType`**: List of receivers to send mail to. Refer to *#/definitions/parameterType*. - **`paramAdditionalContext`**: Refer to *#/definitions/paramAdditionalContext*. diff --git a/openmetadata-docs/content/v1.1.x/menu.md b/openmetadata-docs/content/v1.1.x/menu.md index 4fe600bca472..368463a4847c 100644 --- a/openmetadata-docs/content/v1.1.x/menu.md +++ b/openmetadata-docs/content/v1.1.x/menu.md @@ -1281,7 +1281,7 @@ site_menu: url: /main-concepts/metadata-standard/schemas/entity/services/connections/pipeline/splineconnection - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / ServiceConnection url: /main-concepts/metadata-standard/schemas/entity/services/connections/serviceconnection - - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / Storage / AdlsConection + - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / Storage / AdlsConnection url: /main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsconection - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / Storage / CustomStorageConnection url: /main-concepts/metadata-standard/schemas/entity/services/connections/storage/customstorageconnection diff --git a/openmetadata-docs/content/v1.1.x/quick-start/local-docker-deployment.md b/openmetadata-docs/content/v1.1.x/quick-start/local-docker-deployment.md index ca1832d2e4ff..175d67753343 100644 --- a/openmetadata-docs/content/v1.1.x/quick-start/local-docker-deployment.md +++ b/openmetadata-docs/content/v1.1.x/quick-start/local-docker-deployment.md @@ -95,7 +95,7 @@ Follow the instructions [here](https://docs.docker.com/compose/cli-command/#inst - Install [Docker for Windows](https://www.docker.com/products/docker-desktop) - Once installed, please follow the steps [here](https://docs.docker.com/desktop/windows/wsl/) and complete all the pre-requisites for a seamless installation and deployment. - After completion of the pre-requisites, please install `python3-pip` and `python3-venv` on your Ubuntu system. - - Command: `apt install python3-pip python3-venv` (Ensure that you have the priviledge to install packages, if not, please use Super User.) + - Command: `apt install python3-pip python3-venv` (Ensure that you have the privilege to install packages, if not, please use Super User.) ## Procedure diff --git a/openmetadata-docs/content/v1.1.x/sdk/python/ingestion/lineage.md b/openmetadata-docs/content/v1.1.x/sdk/python/ingestion/lineage.md index 07c8b5b3d441..6bbe892ddac8 100644 --- a/openmetadata-docs/content/v1.1.x/sdk/python/ingestion/lineage.md +++ b/openmetadata-docs/content/v1.1.x/sdk/python/ingestion/lineage.md @@ -437,7 +437,7 @@ workflowConfig: authProvider: ``` -- **serviceName**: Name of the database service which contains tha table involved in query. +- **serviceName**: Name of the database service which contains the table involved in query. - **query**: You can specify the raw sql query within the yaml file itself. - **filePath**: In case the query is too big then you can also save query in a file and pass the path to the file in this field. - **parseTimeout**: Timeout for the lineage parsing process. diff --git a/openmetadata-docs/content/v1.2.x/connectors/dashboard/domo-dashboard/yaml.md b/openmetadata-docs/content/v1.2.x/connectors/dashboard/domo-dashboard/yaml.md index 7fa2debcf7e9..8dcb96f5cc1a 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/dashboard/domo-dashboard/yaml.md +++ b/openmetadata-docs/content/v1.2.x/connectors/dashboard/domo-dashboard/yaml.md @@ -30,7 +30,7 @@ Configure and schedule DomoDashboard metadata and profiler workflows from the Op To deploy OpenMetadata, check the Deployment guides. {%/inlineCallout%} -**Note:** For metadata ingestion, kindly make sure add alteast `dashboard` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add atleast `dashboard` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/openmetadata-docs/content/v1.2.x/connectors/dashboard/superset/index.md b/openmetadata-docs/content/v1.2.x/connectors/dashboard/superset/index.md index cf15d8b6d1a5..bfd7bc56716e 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/dashboard/superset/index.md +++ b/openmetadata-docs/content/v1.2.x/connectors/dashboard/superset/index.md @@ -29,7 +29,7 @@ The ingestion also works with Superset 2.0.0 🎉 **API Connection**: To extract metadata from Superset via API, user must have at least `can read on Chart` & `can read on Dashboard` permissions. -**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` priviledge on `dashboards` & `slices` tables within superset schema. +**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` privilege on `dashboards` & `slices` tables within superset schema. ## Metadata Ingestion diff --git a/openmetadata-docs/content/v1.2.x/connectors/dashboard/superset/yaml.md b/openmetadata-docs/content/v1.2.x/connectors/dashboard/superset/yaml.md index 30180f93b3ba..fb9d996b56ef 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/dashboard/superset/yaml.md +++ b/openmetadata-docs/content/v1.2.x/connectors/dashboard/superset/yaml.md @@ -36,7 +36,7 @@ The ingestion also works with Superset 2.0.0 🎉 **API Connection**: To extract metadata from Superset via API, user must have at least `can read on Chart` & `can read on Dashboard` permissions. -**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` priviledge on `dashboards` & `slices` tables within superset schema. +**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` privilege on `dashboards` & `slices` tables within superset schema. ### Python Requirements diff --git a/openmetadata-docs/content/v1.2.x/connectors/dashboard/tableau/yaml.md b/openmetadata-docs/content/v1.2.x/connectors/dashboard/tableau/yaml.md index 766f34941dc4..038b3d02b197 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/dashboard/tableau/yaml.md +++ b/openmetadata-docs/content/v1.2.x/connectors/dashboard/tableau/yaml.md @@ -26,7 +26,7 @@ Configure and schedule Tableau metadata and profiler workflows from the OpenMeta ## Requirements -To ingest tableau metadata, minimum `Site Role: Viewer` is requried for the tableau user. +To ingest tableau metadata, minimum `Site Role: Viewer` is required for the tableau user. {%inlineCallout icon="description" bold="OpenMetadata 0.12 or later" href="/deployment"%} To deploy OpenMetadata, check the Deployment guides. diff --git a/openmetadata-docs/content/v1.2.x/connectors/database/bigquery/roles.md b/openmetadata-docs/content/v1.2.x/connectors/database/bigquery/roles.md index e4eb8105c2d3..b98d6a6f4adf 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/database/bigquery/roles.md +++ b/openmetadata-docs/content/v1.2.x/connectors/database/bigquery/roles.md @@ -51,10 +51,12 @@ You can search for the required permissions in the filter box and add them accor | 5 | resourcemanager.projects.get | Metadata Ingestion | | 6 | bigquery.jobs.create | Metadata Ingestion | | 7 | bigquery.jobs.listAll | Metadata Ingestion | -| 8 | datacatalog.taxonomies.get | Fetch Policy Tags | -| 9 | datacatalog.taxonomies.list | Fetch Policy Tags | -| 10 | bigquery.readsessions.create | Bigquery Usage & Lineage Workflow | -| 11 | bigquery.readsessions.getData | Bigquery Usage & Lineage Workflow | +| 8 | bigquery.routines.get | Stored Procedure | +| 9 | bigquery.routines.list | Stored Procedure | +| 10 | datacatalog.taxonomies.get | Fetch Policy Tags | +| 11 | datacatalog.taxonomies.list | Fetch Policy Tags | +| 12 | bigquery.readsessions.create | Bigquery Usage & Lineage Workflow | +| 13 | bigquery.readsessions.getData | Bigquery Usage & Lineage Workflow | {% image src="/images/v1.2/connectors/bigquery/create-role-4.png" diff --git a/openmetadata-docs/content/v1.2.x/connectors/database/domo-database/yaml.md b/openmetadata-docs/content/v1.2.x/connectors/database/domo-database/yaml.md index 27942cdeaa75..7e42c3c4fae1 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/database/domo-database/yaml.md +++ b/openmetadata-docs/content/v1.2.x/connectors/database/domo-database/yaml.md @@ -47,7 +47,7 @@ To deploy OpenMetadata, check the Deployment guides. **Note:** -For metadata ingestion, kindly make sure add alteast `data` scopes to the clientId provided. +For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). @@ -63,7 +63,7 @@ pip3 install "openmetadata-ingestion[domo]" All connectors are defined as JSON Schemas. [Here](https://github.com/open-metadata/OpenMetadata/blob/main/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/database/athenaConnection.json) -you can find the structure to create a connection to DomoDatbase. +you can find the structure to create a connection to DomoDatabase. In order to create and run a Metadata Ingestion workflow, we will follow the steps to create a YAML configuration able to connect to the source, diff --git a/openmetadata-docs/content/v1.2.x/connectors/database/mysql/index.md b/openmetadata-docs/content/v1.2.x/connectors/database/mysql/index.md index 76ba35a5014d..ef6418a89467 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/database/mysql/index.md +++ b/openmetadata-docs/content/v1.2.x/connectors/database/mysql/index.md @@ -44,7 +44,7 @@ Configure and schedule MySQL metadata and profiler workflows from the OpenMetada Note that We support MySQL (version 8.0.0 or greater) and the user should have access to the `INFORMATION_SCHEMA` table. By default a user can see only the rows in the `INFORMATION_SCHEMA` that correspond to objects for which the user has the proper access privileges. ```SQL --- Create user. If is ommited, defaults to '%' +-- Create user. If is omitted, defaults to '%' -- More details https://dev.mysql.com/doc/refman/8.0/en/create-user.html CREATE USER ''[@''] IDENTIFIED BY ''; diff --git a/openmetadata-docs/content/v1.2.x/connectors/database/redshift/troubleshooting.md b/openmetadata-docs/content/v1.2.x/connectors/database/redshift/troubleshooting.md index 5b3e13423358..d94c367c367c 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/database/redshift/troubleshooting.md +++ b/openmetadata-docs/content/v1.2.x/connectors/database/redshift/troubleshooting.md @@ -13,7 +13,7 @@ connection to server at \":\" (@IP), does not match host name \":\" ``` -If you get this error that time plese pass `{'sslmode': 'verify-ca'}` in the connection arguments. +If you get this error that time please pass `{'sslmode': 'verify-ca'}` in the connection arguments. {% image src="/images/v1.2/connectors/redshift/service-connection-arguments.png" diff --git a/openmetadata-docs/content/v1.2.x/connectors/database/sap-hana/index.md b/openmetadata-docs/content/v1.2.x/connectors/database/sap-hana/index.md index 2a13a8fff4b0..20dba07eeba2 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/database/sap-hana/index.md +++ b/openmetadata-docs/content/v1.2.x/connectors/database/sap-hana/index.md @@ -94,7 +94,7 @@ We support two possible connection types: - **database**: Optional parameter to connect to a specific database. - **databaseSchema**: databaseSchema of the data source. This is an optional parameter, if you would like to restrict the metadata reading to a single schema. When left blank, OpenMetadata Ingestion attempts to scan all the schemas. -**HDB USer Store** +**HDB USet Store** - **User Key**: HDB Store User Key generated from the command `hdbuserstore SET `. diff --git a/openmetadata-docs/content/v1.2.x/connectors/ingestion/deployment/index.md b/openmetadata-docs/content/v1.2.x/connectors/ingestion/deployment/index.md index c4a8fec30655..f0a6d2c61673 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/ingestion/deployment/index.md +++ b/openmetadata-docs/content/v1.2.x/connectors/ingestion/deployment/index.md @@ -70,7 +70,7 @@ information received in the shape of an `IngestionPipeline` Entity, and the spec After creating a new workflow from the UI or when editing it, there are two calls happening: - `POST` or `PUT` call to update the `Ingestion Pipeline Entity`, -- `/deploy` HTTP call to the `IngestionPipelienResource` to trigger the deployment of the new or updated DAG in the Orchestrator. +- `/deploy` HTTP call to the `IngestionPipelineResource` to trigger the deployment of the new or updated DAG in the Orchestrator. {% image src="/images/v1.2/features/ingestion/ingestion-pipeline/ingestion-pipeline-software-system.drawio.png" diff --git a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/data-quality/index.md b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/data-quality/index.md index 21ad6da07a86..61918ee75528 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/data-quality/index.md +++ b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/data-quality/index.md @@ -48,7 +48,7 @@ Test Definitions are generic tests definition elements specific to a test such a Test Cases specify a Test Definition. It will define what condition a test must meet to be successful (e.g. `max=n`, etc.). One Test Definition can be linked to multiple Test Cases. ## Adding Test Cases to an Entity -Tests cases are actual test that will be ran and executed against your entity. This is where you will define the excution time and logic of these tests +Tests cases are actual test that will be ran and executed against your entity. This is where you will define the execution time and logic of these tests **Note:** you will need to make sure you have the right permission in OpenMetadata to create a test. ## Step 1: Creating a Test Case @@ -157,7 +157,7 @@ processor: ``` The processor type should be set to ` "orm-test-runner"`. For accepted test definition names and parameter value names refer to the [tests page](/connectors/ingestion/workflows/data-quality/tests). -### Key referece: +### Key reference: - `forceUpdate`: if the test case exists (base on the test case name) for the entity, implements the strategy to follow when running the test (i.e. whether or not to update parameters) - `testCases`: list of test cases to execute against the entity referenced - `name`: test case name @@ -184,7 +184,7 @@ processor: config: forceUpdate: false testCases: - - name: column_value_lenght_tagFQN + - name: column_value_length_tagFQN testDefinitionName: columnValueLengthsToBeBetween columnName: tagFQN parameterValues: @@ -304,7 +304,7 @@ From there you can select a Test Suite and visualize the results associated with ### From a Table Entity Navigate to your table and click on the `profiler & Data Quality` tab. From there you'll be able to see test results at the table or column level. #### Table Level Test Results -In the top pannel, click on the white background `Data Quality` button. This will bring you to a summary of all your quality tests at the table level +In the top panel, click on the white background `Data Quality` button. This will bring you to a summary of all your quality tests at the table level {% image src="/images/v1.2/features/ingestion/workflows/data-quality/table-results-entity.png" diff --git a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/data-quality/tests.md b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/data-quality/tests.md index 0666ecd5d9fb..4c11bd6b1552 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/data-quality/tests.md +++ b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/data-quality/tests.md @@ -237,7 +237,7 @@ Validate a list of table column name matches an expected set of columns | ----------- | ----------- | |[`ordered=False`] `columnNames` **matches** the list of column names in the table **regardless of the order**|Success ✅| |[`ordered=True`] `columnNames` **matches** the list of column names in the table **in the corresponding order** (e.g. `["a","b"] == ["a","b"]`| Success ✅| -|[`ordered=fALSE`] `columnNames` **does no match** the list of column names in the table **regardless of the order**|Failed ❌| +|[`ordered=FALSE`] `columnNames` **does no match** the list of column names in the table **regardless of the order**|Failed ❌| |[`ordered=True`] `columnNames` **does no match** the list of column names in the table **and/or the corresponding order** (e.g. `["a","b"] != ["b","a"]`|Failed ❌| **YAML Config** @@ -332,7 +332,7 @@ Validate the number of rows inserted for the defined period is between the expec {% note %} -The Table Row Inserted Count To Be Between cannot be executed against tables that have configured a partition in OpenMetadata. The logic of the test performed will be similar to executiong a Table Row Count to be Between test against a table with a partition configured. +The Table Row Inserted Count To Be Between cannot be executed against tables that have configured a partition in OpenMetadata. The logic of the test performed will be similar to executing a Table Row Count to be Between test against a table with a partition configured. {% /note %} @@ -499,7 +499,7 @@ This test allows us to specify how many values in a column we expect that will m - mariaDB - sqlite - clickhouse -- snowfalke +- snowflake The other databases will fall back to the `LIKE` expression @@ -546,7 +546,7 @@ This test allows us to specify values in a column we expect that will not match - mariaDB - sqlite - clickhouse -- snowfalke +- snowflake The other databases will fall back to the `LIKE` expression @@ -1050,11 +1050,11 @@ Validate the sum of a column is between a specific range **YAML Config** ```yaml -testDefinitionName: columnValueMedianToBeBetween +testDefinitionName: columnValueSumToBeBetween parameterValues: - - name: minValueForMedianInCol + - name: minValueForColSum value: 5 - - name: maxValueForMedianInCol + - name: maxValueForColSum value: 10 ``` diff --git a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md index 2b71addcf0b4..db315f1ce18b 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md +++ b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md @@ -7,7 +7,7 @@ slug: /connectors/ingestion/workflows/dbt/ingest-dbt-lineage Ingest the lineage information from dbt `manifest.json` file into OpenMetadata. -OpenMetadata exctracts the lineage information from the `depends_on` and `compiled_query/compiled_code` keys from the manifest file. +OpenMetadata extracts the lineage information from the `depends_on` and `compiled_query/compiled_code` keys from the manifest file. ### 1. Lineage information from dbt "depends_on" key Openmetadata fetches the lineage information from the `manifest.json` file. Below is a sample `manifest.json` file node containing lineage information under `node_name->depends_on->nodes`. diff --git a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md index badeec052586..d896608adf69 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md +++ b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md @@ -119,7 +119,7 @@ If the owner's name in `manifest.json` or `catalog.json` file is `openmetadata`, ## Linking the Owner to the table -After runing the ingestion workflow with dbt you can see the created user or team getting linked to the table as it's owner as it was specified in the `manifest.json` or `catalog.json` file. +After running the ingestion workflow with dbt you can see the created user or team getting linked to the table as it's owner as it was specified in the `manifest.json` or `catalog.json` file. {% image src="/images/v1.2/features/ingestion/workflows/dbt/ingest_dbt_owner/linked-user.png" diff --git a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md index ba712bd0b9db..412a7f749697 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md +++ b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md @@ -24,7 +24,7 @@ We can create a workflow that will obtain the dbt information from the dbt files ### 1. Create the workflow configuration Configure the dbt.yaml file according keeping only one of the required source (local, http, gcp, s3). -The dbt files should be present on the source mentioned and should have the necssary permissions to be able to access the files. +The dbt files should be present on the source mentioned and should have the necessary permissions to be able to access the files. Enter the name of your database service from OpenMetadata in the `serviceName` key in the yaml diff --git a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/profiler/external_workflow.md b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/profiler/external_workflow.md index 8096e4efef7c..966f5db4bf8e 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/profiler/external_workflow.md +++ b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/profiler/external_workflow.md @@ -107,7 +107,7 @@ processor: # storageConfig: # awsRegion: us-east-2 # awsAccessKeyId: - # awsSecretAccessKey: + # awsSecretAccessKey: # awsSessionToken: # assumeRoleArn: # assumeRoleSessionName: @@ -125,7 +125,7 @@ processor: # storageConfig: # awsRegion: us-east-2 # awsAccessKeyId: - # awsSecretAccessKey: + # awsSecretAccessKey: # awsSessionToken: # assumeRoleArn: # assumeRoleSessionName: diff --git a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/profiler/index.md b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/profiler/index.md index c11db5b412fa..7de12f73ab49 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/profiler/index.md +++ b/openmetadata-docs/content/v1.2.x/connectors/ingestion/workflows/profiler/index.md @@ -67,7 +67,7 @@ Set the sample to be use by the profiler for the specific table. - `Percentage`: Value must be between 0 and 100 exclusive (0 < percentage < 100). This will sample the table based on a percentage - `Row Count`: The table will be sampled based on a number of rows (i.e. `1,000`, `2,000`), etc. -⚠️ This option is currently not support for Druid. Sampling leverage `RANDOM` functions in most database (some have specific sampling functions) and Druid provides neither of these option. We recommend using the partitionning or sample query option if you need to limit the amount of data scanned. +⚠️ This option is currently not support for Druid. Sampling leverage `RANDOM` functions in most database (some have specific sampling functions) and Druid provides neither of these option. We recommend using the partitioning or sample query option if you need to limit the amount of data scanned. **Auto PII Tagging (Optional)** Configuration to automatically tag columns that might contain sensitive information. @@ -109,7 +109,7 @@ Set the sample to be use by the profiler for the specific table. - `Percentage`: Value must be between 0 and 100 exclusive (0 < percentage < 100). This will sample the table based on a percentage - `Row Count`: The table will be sampled based on a number of rows (i.e. `1,000`, `2,000`), etc. -⚠️ This option is currently not support for Druid. Sampling leverage `RANDOM` functions in most database (some have specific sampling functions) and Druid provides neither of these option. We recommend using the partitionning or sample query option if you need to limit the amount of data scanned. +⚠️ This option is currently not support for Druid. Sampling leverage `RANDOM` functions in most database (some have specific sampling functions) and Druid provides neither of these option. We recommend using the partitioning or sample query option if you need to limit the amount of data scanned. **Profile Sample Query** Use a query to sample data for the profiler. This will overwrite any profle sample set. @@ -139,7 +139,7 @@ Once you have picked the `Interval Type` you will need to define the configurati - `YEAR` `COLUMN-VALUE` -- `Value`: a list of value to use for the partitionning logic +- `Value`: a list of value to use for the partitioning logic `INTEGER-RANGE` - `Start Range`: the start of the range (inclusive) @@ -378,7 +378,7 @@ Profiling all the tables in your data platform might not be the most optimized a When setting up a profiler workflow, you have the possibility to filter out/in certain databases, schemas, or tables. Using this feature will greatly help you narrow down which table you want to profile. -### 2. Sampling and Partitionning your Tables +### 2. Sampling and Partitioning your Tables On a table asset, you have the possibility to add a sample percentage/rows and a partitioning logic. Doing so will significantly reduce the amount of data scanned and the computing power required to perform the different operations. For sampling, you can set a sampling percentage at the workflow level. diff --git a/openmetadata-docs/content/v1.2.x/connectors/pipeline/domo-pipeline/yaml.md b/openmetadata-docs/content/v1.2.x/connectors/pipeline/domo-pipeline/yaml.md index 0e4fad5a6443..15039e2b3053 100644 --- a/openmetadata-docs/content/v1.2.x/connectors/pipeline/domo-pipeline/yaml.md +++ b/openmetadata-docs/content/v1.2.x/connectors/pipeline/domo-pipeline/yaml.md @@ -22,7 +22,7 @@ To deploy OpenMetadata, check the Deployment guides. -**Note:** For metadata ingestion, kindly make sure add alteast `data` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/openmetadata-docs/content/v1.2.x/deployment/kubernetes/gke.md b/openmetadata-docs/content/v1.2.x/deployment/kubernetes/gke.md index 16d9bcd5cc96..a9d8d47989b1 100644 --- a/openmetadata-docs/content/v1.2.x/deployment/kubernetes/gke.md +++ b/openmetadata-docs/content/v1.2.x/deployment/kubernetes/gke.md @@ -194,7 +194,7 @@ kubectl create -f nfs-server-deployment.yml kubectl create -f nfs-cluster-ip-service.yml ``` -We create a CluserIP Service for pods to access NFS within the cluster at a fixed IP/DNS. +We create a ClusterIP Service for pods to access NFS within the cluster at a fixed IP/DNS. ### Provision NFS backed PV and PVC for Airflow DAGs and Airflow Logs diff --git a/openmetadata-docs/content/v1.2.x/deployment/security/keycloak/index.md b/openmetadata-docs/content/v1.2.x/deployment/security/keycloak/index.md index b23f0d539992..89855537f572 100644 --- a/openmetadata-docs/content/v1.2.x/deployment/security/keycloak/index.md +++ b/openmetadata-docs/content/v1.2.x/deployment/security/keycloak/index.md @@ -43,7 +43,7 @@ Security requirements for your **production** environment: {% image src="/images/v1.2/deployment/security/keycloak/3-add-client.png" alt="add-client" /%} ### Step 4: Edit settings of the client -- Change "Acess Type" value from "public" to "confidential". +- Change "Access Type" value from "public" to "confidential". - Change "implicit flow" and "service accounts" to enabled. {% image src="/images/v1.2/deployment/security/keycloak/4-edit-settings-client.png" alt="edit-settings-client" /%} diff --git a/openmetadata-docs/content/v1.2.x/deployment/security/saml/index.md b/openmetadata-docs/content/v1.2.x/deployment/security/saml/index.md index bb67031fd2ca..bd086521b2e1 100644 --- a/openmetadata-docs/content/v1.2.x/deployment/security/saml/index.md +++ b/openmetadata-docs/content/v1.2.x/deployment/security/saml/index.md @@ -75,7 +75,7 @@ Every IDP provides this information, we can download the XML Metadata and config 1. EntityId/Authority -> Normally a Url providing info about the provider. 2. SignOn Url -> Url to be used for signing purpose. -3. X509 Certificate -> In case the SP expects a signed reponse from IDP, the IDP can be configured with Signing Certificate given by SP. +3. X509 Certificate -> In case the SP expects a signed response from IDP, the IDP can be configured with Signing Certificate given by SP. 4. Private Key -> In case SP expects a encrypted response from the IDP , the IDP can be configured with SPs public key for encryption and the Private Key can be used for SP for decrypting. SP Metadata XML is available at "http://localhost:8585/api/v1/saml/metadata", `localhost` needs to be updated with the correct URI. diff --git a/openmetadata-docs/content/v1.2.x/developers/architecture/code-layout.md b/openmetadata-docs/content/v1.2.x/developers/architecture/code-layout.md index c42f95ad7e71..21239d2612af 100644 --- a/openmetadata-docs/content/v1.2.x/developers/architecture/code-layout.md +++ b/openmetadata-docs/content/v1.2.x/developers/architecture/code-layout.md @@ -4,7 +4,7 @@ slug: /developers/architecture/code-layout --- # Understand Code Layout -Use this document as a quick start guide to begin developing in OpenMetdata. Below, we address the following topics: +Use this document as a quick start guide to begin developing in OpenMetadata. Below, we address the following topics: 1. Schema (Metadata Models) 2. APIs diff --git a/openmetadata-docs/content/v1.2.x/features/alerts-notifications/index.md b/openmetadata-docs/content/v1.2.x/features/alerts-notifications/index.md index 57a162a941d0..23f9ed75798a 100644 --- a/openmetadata-docs/content/v1.2.x/features/alerts-notifications/index.md +++ b/openmetadata-docs/content/v1.2.x/features/alerts-notifications/index.md @@ -38,7 +38,7 @@ For slack configuration you will need to get the endpoint URL of the channel whe - **Secret Key**: Secret key can be used to secure the webhook connection. ### MS Teams -For MS Teams configuration you will need to get the endpoint URL if the chanel where you wish to send the alerts. You can find this by going to the Teams channel where you want the posts to appear, clicking the three dots `...`, and clicking "Connectors". Then add the "Incoming Webhook" connector. Copy this connector's URL and supply it here to OpenMetadata. It may be in the form of `https://your-domain.webhook.office.com/webhookb2/...@.../IncomingWebhook/.../...`. For more on MS Teams webhooks, see [Create an Incoming Webhook](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook). Additionally, you can configure the following parameter: +For MS Teams configuration you will need to get the endpoint URL if the channel where you wish to send the alerts. You can find this by going to the Teams channel where you want the posts to appear, clicking the three dots `...`, and clicking "Connectors". Then add the "Incoming Webhook" connector. Copy this connector's URL and supply it here to OpenMetadata. It may be in the form of `https://your-domain.webhook.office.com/webhookb2/...@.../IncomingWebhook/.../...`. For more on MS Teams webhooks, see [Create an Incoming Webhook](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook). Additionally, you can configure the following parameter: - **Batch Size**: size of the batch that will be sent to the endpoint. - **Connection Timeout**: timeout for the connection. - **Secret Key**: Secret key can be used to secure the webhook connection. diff --git a/openmetadata-docs/content/v1.2.x/features/data-insight/index.md b/openmetadata-docs/content/v1.2.x/features/data-insight/index.md index a817b67e9641..995514b79434 100644 --- a/openmetadata-docs/content/v1.2.x/features/data-insight/index.md +++ b/openmetadata-docs/content/v1.2.x/features/data-insight/index.md @@ -65,7 +65,7 @@ This chart shows the top 10 data assets the most viewed in your platform. It off /%} **Page views by data assets** -This chart shows the total number of page views by asset type. This allows you to understand which asset familly drives the most interest in your organization +This chart shows the total number of page views by asset type. This allows you to understand which asset family drives the most interest in your organization {% image src="/images/v1.2/features/data-insight/views-by-assets.png" diff --git a/openmetadata-docs/content/v1.2.x/how-to-guides/data-collaboration/index.md b/openmetadata-docs/content/v1.2.x/how-to-guides/data-collaboration/index.md index d2854648318e..0695fd5dd1fb 100644 --- a/openmetadata-docs/content/v1.2.x/how-to-guides/data-collaboration/index.md +++ b/openmetadata-docs/content/v1.2.x/how-to-guides/data-collaboration/index.md @@ -10,7 +10,7 @@ OpenMetadata is a catalyst for collaboration that brings data teams together to There are three important aspects of data collaboration in OpenMetadata: - **Conversations Threads:** Collaborate around data assets and tags by asking the right questions and discussing the details right within OpenMetadata. -- **Tasks:** Create tasks around data assets to create and update descriptions, request for tags, and initaite a glossary term approval workflow. +- **Tasks:** Create tasks around data assets to create and update descriptions, request for tags, and initiate a glossary term approval workflow. - **Announcements:** Announce to your entire team about the upcoming events and changes such as deprecation, deletion, or schema changes. diff --git a/openmetadata-docs/content/v1.2.x/how-to-guides/data-discovery/discover.md b/openmetadata-docs/content/v1.2.x/how-to-guides/data-discovery/discover.md index 41f6d7e9bb02..bcd9a6d07b65 100644 --- a/openmetadata-docs/content/v1.2.x/how-to-guides/data-discovery/discover.md +++ b/openmetadata-docs/content/v1.2.x/how-to-guides/data-discovery/discover.md @@ -34,7 +34,7 @@ alt="Filter by the Type of Data Asset" caption="Filter by the Type of Data Asset" /%} -Users can navigate to the Explore page for specific type of data assets and use the filter options relevant to that data assset to narrow down the search. +Users can navigate to the Explore page for specific type of data assets and use the filter options relevant to that data asset to narrow down the search. ## Filter by Asset Owner A team or a user can own the data asset in OpenMetadata. Users can filter data assets by the asset owner. With information on the data asset owners, you can direct your questions to the right person or team. diff --git a/openmetadata-docs/content/v1.2.x/how-to-guides/data-governance/glossary/approval.md b/openmetadata-docs/content/v1.2.x/how-to-guides/data-governance/glossary/approval.md index 000129b371ee..19db43b14945 100644 --- a/openmetadata-docs/content/v1.2.x/how-to-guides/data-governance/glossary/approval.md +++ b/openmetadata-docs/content/v1.2.x/how-to-guides/data-governance/glossary/approval.md @@ -9,7 +9,7 @@ The business glossary plays a vital role in standardizing terminology in an orga Watch the video to learn more about **[Glossary Approval Workflow](https://www.youtube.com/watch?v=PgTcKQtpAks&t=4s)** -{% youtube videoId="PgTcKQtpAks" start="0:00" end="2:51" width="560px" height="315px" /%} +{% youtube videoId="PgTcKQtpAsk" start="0:00" end="2:51" width="560px" height="315px" /%} To automate the approval workflow, ensure that your Glossary has **Reviewers** assigned. If you glossary has reviewers assigned, the glossary approval workflow gets triggered when a **New Term** is added. diff --git a/openmetadata-docs/content/v1.2.x/how-to-guides/data-insights/data-culture.md b/openmetadata-docs/content/v1.2.x/how-to-guides/data-insights/data-culture.md index 7d1d7520af10..bca82c99bb73 100644 --- a/openmetadata-docs/content/v1.2.x/how-to-guides/data-insights/data-culture.md +++ b/openmetadata-docs/content/v1.2.x/how-to-guides/data-insights/data-culture.md @@ -22,7 +22,7 @@ Data is a shared responsibility of the organization and requires an end-to-end a ### 1. Data Needs Clear Ownership -All important data must be owned. Individuals should not own important data assets. Team ownership is preffered over User ownership. It also pushes the data responsibility to a team instead of an individual user. +All important data must be owned. Individuals should not own important data assets. Team ownership is preferred over User ownership. It also pushes the data responsibility to a team instead of an individual user. ### 2. Measure What Matters @@ -60,7 +60,7 @@ Data without description is hard to use, resulting in the loss of productivity. ### 4. Develop Data Vocabulary -Data vocabulary helps in the consistent understanding of data. In OpenMetdata, using the [Glossary](/how-to-guides/data-governance/glossary) feature, you can describe business terms and concepts in a single place. Also, the data assets can be labelled using these glossary terms in order to provide semantic meaning. +Data vocabulary helps in the consistent understanding of data. In OpenMetadata, using the [Glossary](/how-to-guides/data-governance/glossary) feature, you can describe business terms and concepts in a single place. Also, the data assets can be labelled using these glossary terms in order to provide semantic meaning. ### 5 Identify Important Data with Tiers diff --git a/openmetadata-docs/content/v1.2.x/how-to-guides/data-insights/report.md b/openmetadata-docs/content/v1.2.x/how-to-guides/data-insights/report.md index 9185abcfe961..8fe4bec4aa07 100644 --- a/openmetadata-docs/content/v1.2.x/how-to-guides/data-insights/report.md +++ b/openmetadata-docs/content/v1.2.x/how-to-guides/data-insights/report.md @@ -117,7 +117,7 @@ caption="Most Viewed Data Assets" ### Page Views by Data Assets -It helps to understand the total number of page views by asset type. This allows you to understand which asset familly drives the most interest in your organization +It helps to understand the total number of page views by asset type. This allows you to understand which asset family drives the most interest in your organization {% image src="/images/v1.2/how-to-guides/insights/pvda.png" diff --git a/openmetadata-docs/content/v1.2.x/how-to-guides/user-guide-data-users/data-ownership.md b/openmetadata-docs/content/v1.2.x/how-to-guides/user-guide-data-users/data-ownership.md index 31347bcd1c31..ab849f1819f6 100644 --- a/openmetadata-docs/content/v1.2.x/how-to-guides/user-guide-data-users/data-ownership.md +++ b/openmetadata-docs/content/v1.2.x/how-to-guides/user-guide-data-users/data-ownership.md @@ -38,13 +38,13 @@ If no owner is selected, and if the Database or Database Schema has a owner, the OpenMetadata supports Owner Propagation and the owner will be propagated based on a top-down hierarchy. The owner of the Database will be auto-propagated as the owner of the Database Schemas and Tables under it. Similarly, the owner of the Database Schema will be auto-propagated as the owner of the Tables under it. -- Owner Propogation does not work for data assets that already have an Owner assigned to them. If there is **no owner**, then an Owner will be assigned based on the hierarchy. +- Owner Propagation does not work for data assets that already have an Owner assigned to them. If there is **no owner**, then an Owner will be assigned based on the hierarchy. - If a Database or Database Schema has an Owner assigned, and you **delete the owner** from the Database Schema or Tables under it, then the Owner will be auto-assigned in this case based on the existing Owner details at the top hierarchy. - You can also assign a different owner manually. -## Team Ownership is Preffered +## Team Ownership is Preferred OpenMetadata is a data collaboration platform. We highly recommend Team Ownership of data assets, because individual users will only have part of the context about the data asset in question. Assigning team ownership will give access to all the members of a particular team. Only teams of the type ‘**Groups**’ can own data assets. diff --git a/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md b/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md index df9c047f4617..4cef3d0188d3 100644 --- a/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md +++ b/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md @@ -10,7 +10,7 @@ slug: /main-concepts/metadata-standard/schemas/entity/services/connections/dashb ## Properties - **`type`**: Service Type. Refer to *#/definitions/qlikSenseType*. Default: `QlikSense`. -- **`displayUrl`** *(string)*: Qlik Sense Base URL, used for genrating dashboard & chat url. +- **`displayUrl`** *(string)*: Qlik Sense Base URL, used for generating dashboard & chat url. - **`hostPort`** *(string)*: URL for the superset instance. - **`certificates`** - **`userDirectory`** *(string)*: User Directory. diff --git a/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConection.md b/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConnection.md similarity index 97% rename from openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConection.md rename to openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConnection.md index bec45e0d0625..5062b8782032 100644 --- a/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConection.md +++ b/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConnection.md @@ -1,5 +1,5 @@ --- -title: adlsConection +title: adlsConnection slug: /main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsconection --- diff --git a/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md b/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md index e1d53fdc5cfc..2e8c93fc0717 100644 --- a/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md +++ b/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md @@ -10,7 +10,7 @@ slug: /main-concepts/metadata-standard/schemas/tests/table/tablecolumntomatchset ## Properties - **`columnNames`** *(string)*: Expected columns of the table to match the ones in {columnValuesSet}. -- **`ordered`** *(boolean)*: Wether or not to considered the order of the list when performing the match. Default: `False`. +- **`ordered`** *(boolean)*: Whether or not to considered the order of the list when performing the match. Default: `False`. Documentation file automatically generated at 2022-07-14 10:51:34.749986. diff --git a/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/type/function.md b/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/type/function.md index c39dd96b55cd..bb41a3761561 100644 --- a/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/type/function.md +++ b/openmetadata-docs/content/v1.2.x/main-concepts/metadata-standard/schemas/type/function.md @@ -11,7 +11,7 @@ slug: /main-concepts/metadata-standard/schemas/type/function - **`name`** *(string)*: Name of the function. - **`input`** *(string)*: Description of input taken by the function. -- **`description`** *(string)*: Description fo the function. +- **`description`** *(string)*: Description for the function. - **`examples`** *(array)*: Examples of the function to help users author conditions. - **`parameterInputType`**: List of receivers to send mail to. Refer to *#/definitions/parameterType*. - **`paramAdditionalContext`**: Refer to *#/definitions/paramAdditionalContext*. diff --git a/openmetadata-docs/content/v1.2.x/menu.md b/openmetadata-docs/content/v1.2.x/menu.md index d0793030d076..7b85473bd67c 100644 --- a/openmetadata-docs/content/v1.2.x/menu.md +++ b/openmetadata-docs/content/v1.2.x/menu.md @@ -1402,7 +1402,7 @@ site_menu: url: /main-concepts/metadata-standard/schemas/entity/services/connections/search/opensearchconnection - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / ServiceConnection url: /main-concepts/metadata-standard/schemas/entity/services/connections/serviceconnection - - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / Storage / AdlsConection + - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / Storage / AdlsConnection url: /main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsconection - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / Storage / CustomStorageConnection url: /main-concepts/metadata-standard/schemas/entity/services/connections/storage/customstorageconnection diff --git a/openmetadata-docs/content/v1.2.x/quick-start/local-docker-deployment.md b/openmetadata-docs/content/v1.2.x/quick-start/local-docker-deployment.md index b25a01a8f790..6304d08f9b50 100644 --- a/openmetadata-docs/content/v1.2.x/quick-start/local-docker-deployment.md +++ b/openmetadata-docs/content/v1.2.x/quick-start/local-docker-deployment.md @@ -95,7 +95,7 @@ Follow the instructions [here](https://docs.docker.com/compose/cli-command/#inst - Install [Docker for Windows](https://www.docker.com/products/docker-desktop) - Once installed, please follow the steps [here](https://docs.docker.com/desktop/windows/wsl/) and complete all the pre-requisites for a seamless installation and deployment. - After completion of the pre-requisites, please install `python3-pip` and `python3-venv` on your Ubuntu system. - - Command: `apt install python3-pip python3-venv` (Ensure that you have the priviledge to install packages, if not, please use Super User.) + - Command: `apt install python3-pip python3-venv` (Ensure that you have the privilege to install packages, if not, please use Super User.) ## Procedure diff --git a/openmetadata-docs/content/v1.2.x/sdk/python/api-reference/data_insight_mixin.md b/openmetadata-docs/content/v1.2.x/sdk/python/api-reference/data_insight_mixin.md index 5a3f94306796..9a2fb5d4f140 100644 --- a/openmetadata-docs/content/v1.2.x/sdk/python/api-reference/data_insight_mixin.md +++ b/openmetadata-docs/content/v1.2.x/sdk/python/api-reference/data_insight_mixin.md @@ -160,7 +160,7 @@ Deletes web analytics events before a timestamp get_aggregated_data_insight_results( start_ts: 'int', end_ts: 'int', - data_insight_chart_nane: 'str', + data_insight_chart_name: 'str', data_report_index: 'str', params: 'Optional[dict]' = None ) → DataInsightChartResult @@ -174,7 +174,7 @@ _summary_ - `start_ts` (int): _description_ - `end_ts` (int): _description_ - - `data_insight_chart_nane` (str): _description_ + - `data_insight_chart_name` (str): _description_ - `data_report_index` (str): _description_ - `params` (Optional[dict], optional): _description_. Defaults to None. diff --git a/openmetadata-docs/content/v1.2.x/sdk/python/api-reference/tests_mixin.md b/openmetadata-docs/content/v1.2.x/sdk/python/api-reference/tests_mixin.md index 824a4ff7a4b4..a21fd26a7995 100644 --- a/openmetadata-docs/content/v1.2.x/sdk/python/api-reference/tests_mixin.md +++ b/openmetadata-docs/content/v1.2.x/sdk/python/api-reference/tests_mixin.md @@ -203,7 +203,7 @@ Get or create a test definition - `test_definition_description` (Optional[str], optional): description for the test definition. Defaults to None. - `entity_type` (Optional[EntityType], optional): entity type (COLUMN or TABLE). Defaults to None. - `test_platforms` (Optional[List[TestPlatform]], optional): test platforms. Defaults to None. - - `test_case_parameter_definition` (Optional[List[TestCaseParameterDefinition]], optional): parameters for the test case defintion. Defaults to None. + - `test_case_parameter_definition` (Optional[List[TestCaseParameterDefinition]], optional): parameters for the test case definition. Defaults to None. diff --git a/openmetadata-docs/content/v1.2.x/sdk/python/ingestion/lineage.md b/openmetadata-docs/content/v1.2.x/sdk/python/ingestion/lineage.md index c7fb60e7788c..9a67e402d6de 100644 --- a/openmetadata-docs/content/v1.2.x/sdk/python/ingestion/lineage.md +++ b/openmetadata-docs/content/v1.2.x/sdk/python/ingestion/lineage.md @@ -440,7 +440,7 @@ workflowConfig: authProvider: ``` -- **serviceName**: Name of the database service which contains tha table involved in query. +- **serviceName**: Name of the database service which contains the table involved in query. - **query**: You can specify the raw sql query within the yaml file itself. - **filePath**: In case the query is too big then you can also save query in a file and pass the path to the file in this field. - **parseTimeout**: Timeout for the lineage parsing process. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/domo-dashboard/yaml.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/domo-dashboard/yaml.md index eb6064942949..1ec4720ec8a1 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/domo-dashboard/yaml.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/domo-dashboard/yaml.md @@ -30,7 +30,7 @@ Configure and schedule DomoDashboard metadata and profiler workflows from the Op To deploy OpenMetadata, check the Deployment guides. {%/inlineCallout%} -**Note:** For metadata ingestion, kindly make sure add alteast `dashboard` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add atleast `dashboard` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/superset/index.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/superset/index.md index 4043e48b65c2..255687f33aa5 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/superset/index.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/superset/index.md @@ -29,7 +29,7 @@ The ingestion also works with Superset 2.0.0 🎉 **API Connection**: To extract metadata from Superset via API, user must have at least `can read on Chart` & `can read on Dashboard` permissions. -**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` priviledge on `dashboards` & `slices` tables within superset schema. +**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` privilege on `dashboards` & `slices` tables within superset schema. ## Metadata Ingestion diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/superset/yaml.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/superset/yaml.md index 054160618bea..5c9b6e085511 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/superset/yaml.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/superset/yaml.md @@ -36,7 +36,7 @@ The ingestion also works with Superset 2.0.0 🎉 **API Connection**: To extract metadata from Superset via API, user must have at least `can read on Chart` & `can read on Dashboard` permissions. -**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` priviledge on `dashboards` & `slices` tables within superset schema. +**Database Connection**: To extract metadata from Superset via MySQL or Postgres database, database user must have at least `SELECT` privilege on `dashboards` & `slices` tables within superset schema. ### Python Requirements diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/tableau/yaml.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/tableau/yaml.md index 484902da11ca..5b7327e08b90 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/tableau/yaml.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/dashboard/tableau/yaml.md @@ -26,7 +26,7 @@ Configure and schedule Tableau metadata and profiler workflows from the OpenMeta ## Requirements -To ingest tableau metadata, minimum `Site Role: Viewer` is requried for the tableau user. +To ingest tableau metadata, minimum `Site Role: Viewer` is required for the tableau user. {%inlineCallout icon="description" bold="OpenMetadata 0.12 or later" href="/deployment"%} To deploy OpenMetadata, check the Deployment guides. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/bigquery/roles.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/bigquery/roles.md index 65cce6353541..f4fb726d2419 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/bigquery/roles.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/bigquery/roles.md @@ -51,10 +51,12 @@ You can search for the required permissions in the filter box and add them accor | 5 | resourcemanager.projects.get | Metadata Ingestion | | 6 | bigquery.jobs.create | Metadata Ingestion | | 7 | bigquery.jobs.listAll | Metadata Ingestion | -| 8 | datacatalog.taxonomies.get | Fetch Policy Tags | -| 9 | datacatalog.taxonomies.list | Fetch Policy Tags | -| 10 | bigquery.readsessions.create | Bigquery Usage & Lineage Workflow | -| 11 | bigquery.readsessions.getData | Bigquery Usage & Lineage Workflow | +| 8 | bigquery.routines.get | Stored Procedure | +| 9 | bigquery.routines.list | Stored Procedure | +| 10 | datacatalog.taxonomies.get | Fetch Policy Tags | +| 11 | datacatalog.taxonomies.list | Fetch Policy Tags | +| 12 | bigquery.readsessions.create | Bigquery Usage & Lineage Workflow | +| 13 | bigquery.readsessions.getData | Bigquery Usage & Lineage Workflow | {% image src="/images/v1.3/connectors/bigquery/create-role-4.png" diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/domo-database/yaml.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/domo-database/yaml.md index a4710c9d6ecc..cb8e148d3206 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/domo-database/yaml.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/domo-database/yaml.md @@ -47,7 +47,7 @@ To deploy OpenMetadata, check the Deployment guides. **Note:** -For metadata ingestion, kindly make sure add alteast `data` scopes to the clientId provided. +For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). @@ -63,7 +63,7 @@ pip3 install "openmetadata-ingestion[domo]" All connectors are defined as JSON Schemas. [Here](https://github.com/open-metadata/OpenMetadata/blob/main/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/database/athenaConnection.json) -you can find the structure to create a connection to DomoDatbase. +you can find the structure to create a connection to DomoDatabase. In order to create and run a Metadata Ingestion workflow, we will follow the steps to create a YAML configuration able to connect to the source, diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/mysql/index.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/mysql/index.md index 041313d95444..dd35f13be840 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/mysql/index.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/mysql/index.md @@ -44,7 +44,7 @@ Configure and schedule MySQL metadata and profiler workflows from the OpenMetada Note that We support MySQL (version 8.0.0 or greater) and the user should have access to the `INFORMATION_SCHEMA` table. By default a user can see only the rows in the `INFORMATION_SCHEMA` that correspond to objects for which the user has the proper access privileges. ```SQL --- Create user. If is ommited, defaults to '%' +-- Create user. If is omitted, defaults to '%' -- More details https://dev.mysql.com/doc/refman/8.0/en/create-user.html CREATE USER ''[@''] IDENTIFIED BY ''; diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/redshift/troubleshooting.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/redshift/troubleshooting.md index 1a2742247a89..063fc8062708 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/redshift/troubleshooting.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/redshift/troubleshooting.md @@ -13,7 +13,7 @@ connection to server at \":\" (@IP), does not match host name \":\" ``` -If you get this error that time plese pass `{'sslmode': 'verify-ca'}` in the connection arguments. +If you get this error that time please pass `{'sslmode': 'verify-ca'}` in the connection arguments. {% image src="/images/v1.3/connectors/redshift/service-connection-arguments.png" diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/sap-hana/index.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/sap-hana/index.md index 791e1da1c16b..60ed7a495ff2 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/sap-hana/index.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/database/sap-hana/index.md @@ -94,7 +94,7 @@ We support two possible connection types: - **database**: Optional parameter to connect to a specific database. - **databaseSchema**: databaseSchema of the data source. This is an optional parameter, if you would like to restrict the metadata reading to a single schema. When left blank, OpenMetadata Ingestion attempts to scan all the schemas. -**HDB USer Store** +**HDB USet Store** - **User Key**: HDB Store User Key generated from the command `hdbuserstore SET `. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/deployment/index.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/deployment/index.md index 0b38fed1fed8..43deca8a3ba9 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/deployment/index.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/deployment/index.md @@ -70,7 +70,7 @@ information received in the shape of an `IngestionPipeline` Entity, and the spec After creating a new workflow from the UI or when editing it, there are two calls happening: - `POST` or `PUT` call to update the `Ingestion Pipeline Entity`, -- `/deploy` HTTP call to the `IngestionPipelienResource` to trigger the deployment of the new or updated DAG in the Orchestrator. +- `/deploy` HTTP call to the `IngestionPipelineResource` to trigger the deployment of the new or updated DAG in the Orchestrator. {% image src="/images/v1.3/features/ingestion/ingestion-pipeline/ingestion-pipeline-software-system.drawio.png" diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/data-quality/index.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/data-quality/index.md index 89dd9659636c..b5a07eb9b704 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/data-quality/index.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/data-quality/index.md @@ -48,7 +48,7 @@ Test Definitions are generic tests definition elements specific to a test such a Test Cases specify a Test Definition. It will define what condition a test must meet to be successful (e.g. `max=n`, etc.). One Test Definition can be linked to multiple Test Cases. ## Adding Test Cases to an Entity -Tests cases are actual test that will be ran and executed against your entity. This is where you will define the excution time and logic of these tests +Tests cases are actual test that will be ran and executed against your entity. This is where you will define the execution time and logic of these tests **Note:** you will need to make sure you have the right permission in OpenMetadata to create a test. ## Step 1: Creating a Test Case @@ -157,7 +157,7 @@ processor: ``` The processor type should be set to ` "orm-test-runner"`. For accepted test definition names and parameter value names refer to the [tests page](/connectors/ingestion/workflows/data-quality/tests). -### Key referece: +### Key reference: - `forceUpdate`: if the test case exists (base on the test case name) for the entity, implements the strategy to follow when running the test (i.e. whether or not to update parameters) - `testCases`: list of test cases to execute against the entity referenced - `name`: test case name @@ -184,7 +184,7 @@ processor: config: forceUpdate: false testCases: - - name: column_value_lenght_tagFQN + - name: column_value_length_tagFQN testDefinitionName: columnValueLengthsToBeBetween columnName: tagFQN parameterValues: @@ -304,7 +304,7 @@ From there you can select a Test Suite and visualize the results associated with ### From a Table Entity Navigate to your table and click on the `profiler & Data Quality` tab. From there you'll be able to see test results at the table or column level. #### Table Level Test Results -In the top pannel, click on the white background `Data Quality` button. This will bring you to a summary of all your quality tests at the table level +In the top panel, click on the white background `Data Quality` button. This will bring you to a summary of all your quality tests at the table level {% image src="/images/v1.3/features/ingestion/workflows/data-quality/table-results-entity.png" diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/data-quality/tests.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/data-quality/tests.md index 0666ecd5d9fb..69046df96cad 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/data-quality/tests.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/data-quality/tests.md @@ -237,7 +237,7 @@ Validate a list of table column name matches an expected set of columns | ----------- | ----------- | |[`ordered=False`] `columnNames` **matches** the list of column names in the table **regardless of the order**|Success ✅| |[`ordered=True`] `columnNames` **matches** the list of column names in the table **in the corresponding order** (e.g. `["a","b"] == ["a","b"]`| Success ✅| -|[`ordered=fALSE`] `columnNames` **does no match** the list of column names in the table **regardless of the order**|Failed ❌| +|[`ordered=FALSE`] `columnNames` **does no match** the list of column names in the table **regardless of the order**|Failed ❌| |[`ordered=True`] `columnNames` **does no match** the list of column names in the table **and/or the corresponding order** (e.g. `["a","b"] != ["b","a"]`|Failed ❌| **YAML Config** @@ -332,7 +332,7 @@ Validate the number of rows inserted for the defined period is between the expec {% note %} -The Table Row Inserted Count To Be Between cannot be executed against tables that have configured a partition in OpenMetadata. The logic of the test performed will be similar to executiong a Table Row Count to be Between test against a table with a partition configured. +The Table Row Inserted Count To Be Between cannot be executed against tables that have configured a partition in OpenMetadata. The logic of the test performed will be similar to executing a Table Row Count to be Between test against a table with a partition configured. {% /note %} @@ -499,7 +499,7 @@ This test allows us to specify how many values in a column we expect that will m - mariaDB - sqlite - clickhouse -- snowfalke +- snowflake The other databases will fall back to the `LIKE` expression @@ -546,7 +546,7 @@ This test allows us to specify values in a column we expect that will not match - mariaDB - sqlite - clickhouse -- snowfalke +- snowflake The other databases will fall back to the `LIKE` expression diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md index ee593566deb8..efec2afa0ca5 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-lineage.md @@ -7,7 +7,7 @@ slug: /connectors/ingestion/workflows/dbt/ingest-dbt-lineage Ingest the lineage information from dbt `manifest.json` file into OpenMetadata. -OpenMetadata exctracts the lineage information from the `depends_on` and `compiled_query/compiled_code` keys from the manifest file. +OpenMetadata extracts the lineage information from the `depends_on` and `compiled_query/compiled_code` keys from the manifest file. ### 1. Lineage information from dbt "depends_on" key Openmetadata fetches the lineage information from the `manifest.json` file. Below is a sample `manifest.json` file node containing lineage information under `node_name->depends_on->nodes`. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md index 6bb249d48695..79bfebc6d822 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-owner.md @@ -119,7 +119,7 @@ If the owner's name in `manifest.json` or `catalog.json` file is `openmetadata`, ## Linking the Owner to the table -After runing the ingestion workflow with dbt you can see the created user or team getting linked to the table as it's owner as it was specified in the `manifest.json` or `catalog.json` file. +After running the ingestion workflow with dbt you can see the created user or team getting linked to the table as it's owner as it was specified in the `manifest.json` or `catalog.json` file. {% image src="/images/v1.3/features/ingestion/workflows/dbt/ingest_dbt_owner/linked-user.png" diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md index 4399f4b388ce..b9bd1ffc889d 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/dbt/ingest-dbt-yaml.md @@ -24,7 +24,7 @@ We can create a workflow that will obtain the dbt information from the dbt files ### 1. Create the workflow configuration Configure the dbt.yaml file according keeping only one of the required source (local, http, gcp, s3). -The dbt files should be present on the source mentioned and should have the necssary permissions to be able to access the files. +The dbt files should be present on the source mentioned and should have the necessary permissions to be able to access the files. Enter the name of your database service from OpenMetadata in the `serviceName` key in the yaml diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/profiler/external_workflow.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/profiler/external_workflow.md index 1a858eb85ef8..cb48a7c091ce 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/profiler/external_workflow.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/profiler/external_workflow.md @@ -107,7 +107,7 @@ processor: # storageConfig: # awsRegion: us-east-2 # awsAccessKeyId: - # awsSecretAccessKey: + # awsSecretAccessKey: # awsSessionToken: # assumeRoleArn: # assumeRoleSessionName: @@ -125,7 +125,7 @@ processor: # storageConfig: # awsRegion: us-east-2 # awsAccessKeyId: - # awsSecretAccessKey: + # awsSecretAccessKey: # awsSessionToken: # assumeRoleArn: # assumeRoleSessionName: diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/profiler/index.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/profiler/index.md index abaa097aef89..ce5433a60395 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/profiler/index.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/ingestion/workflows/profiler/index.md @@ -67,7 +67,7 @@ Set the sample to be use by the profiler for the specific table. - `Percentage`: Value must be between 0 and 100 exclusive (0 < percentage < 100). This will sample the table based on a percentage - `Row Count`: The table will be sampled based on a number of rows (i.e. `1,000`, `2,000`), etc. -⚠️ This option is currently not support for Druid. Sampling leverage `RANDOM` functions in most database (some have specific sampling functions) and Druid provides neither of these option. We recommend using the partitionning or sample query option if you need to limit the amount of data scanned. +⚠️ This option is currently not support for Druid. Sampling leverage `RANDOM` functions in most database (some have specific sampling functions) and Druid provides neither of these option. We recommend using the partitioning or sample query option if you need to limit the amount of data scanned. **Auto PII Tagging (Optional)** Configuration to automatically tag columns that might contain sensitive information. @@ -109,7 +109,7 @@ Set the sample to be use by the profiler for the specific table. - `Percentage`: Value must be between 0 and 100 exclusive (0 < percentage < 100). This will sample the table based on a percentage - `Row Count`: The table will be sampled based on a number of rows (i.e. `1,000`, `2,000`), etc. -⚠️ This option is currently not support for Druid. Sampling leverage `RANDOM` functions in most database (some have specific sampling functions) and Druid provides neither of these option. We recommend using the partitionning or sample query option if you need to limit the amount of data scanned. +⚠️ This option is currently not support for Druid. Sampling leverage `RANDOM` functions in most database (some have specific sampling functions) and Druid provides neither of these option. We recommend using the partitioning or sample query option if you need to limit the amount of data scanned. **Profile Sample Query** Use a query to sample data for the profiler. This will overwrite any profle sample set. @@ -139,7 +139,7 @@ Once you have picked the `Interval Type` you will need to define the configurati - `YEAR` `COLUMN-VALUE` -- `Value`: a list of value to use for the partitionning logic +- `Value`: a list of value to use for the partitioning logic `INTEGER-RANGE` - `Start Range`: the start of the range (inclusive) @@ -378,7 +378,7 @@ Profiling all the tables in your data platform might not be the most optimized a When setting up a profiler workflow, you have the possibility to filter out/in certain databases, schemas, or tables. Using this feature will greatly help you narrow down which table you want to profile. -### 2. Sampling and Partitionning your Tables +### 2. Sampling and Partitioning your Tables On a table asset, you have the possibility to add a sample percentage/rows and a partitioning logic. Doing so will significantly reduce the amount of data scanned and the computing power required to perform the different operations. For sampling, you can set a sampling percentage at the workflow level. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/pipeline/domo-pipeline/yaml.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/pipeline/domo-pipeline/yaml.md index 2e47dd917cd1..b8912f1fe5b3 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/pipeline/domo-pipeline/yaml.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/connectors/pipeline/domo-pipeline/yaml.md @@ -22,7 +22,7 @@ To deploy OpenMetadata, check the Deployment guides. -**Note:** For metadata ingestion, kindly make sure add alteast `data` scopes to the clientId provided. +**Note:** For metadata ingestion, kindly make sure add atleast `data` scopes to the clientId provided. Question related to scopes, click [here](https://developer.domo.com/portal/1845fc11bbe5d-api-authentication). ### Python Requirements diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/kubernetes/gke.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/kubernetes/gke.md index a45845045ae9..b07b60db1b4c 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/kubernetes/gke.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/kubernetes/gke.md @@ -194,7 +194,7 @@ kubectl create -f nfs-server-deployment.yml kubectl create -f nfs-cluster-ip-service.yml ``` -We create a CluserIP Service for pods to access NFS within the cluster at a fixed IP/DNS. +We create a ClusterIP Service for pods to access NFS within the cluster at a fixed IP/DNS. ### Provision NFS backed PV and PVC for Airflow DAGs and Airflow Logs diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/security/keycloak/index.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/security/keycloak/index.md index 3bc9ba7879e2..f8f7818b3cf0 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/security/keycloak/index.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/security/keycloak/index.md @@ -43,7 +43,7 @@ Security requirements for your **production** environment: {% image src="/images/v1.3/deployment/security/keycloak/3-add-client.png" alt="add-client" /%} ### Step 4: Edit settings of the client -- Change "Acess Type" value from "public" to "confidential". +- Change "Access Type" value from "public" to "confidential". - Change "implicit flow" and "service accounts" to enabled. {% image src="/images/v1.3/deployment/security/keycloak/4-edit-settings-client.png" alt="edit-settings-client" /%} diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/security/saml/index.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/security/saml/index.md index 1de18b13d37f..a2929fe9f840 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/security/saml/index.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/deployment/security/saml/index.md @@ -75,7 +75,7 @@ Every IDP provides this information, we can download the XML Metadata and config 1. EntityId/Authority -> Normally a Url providing info about the provider. 2. SignOn Url -> Url to be used for signing purpose. -3. X509 Certificate -> In case the SP expects a signed reponse from IDP, the IDP can be configured with Signing Certificate given by SP. +3. X509 Certificate -> In case the SP expects a signed response from IDP, the IDP can be configured with Signing Certificate given by SP. 4. Private Key -> In case SP expects a encrypted response from the IDP , the IDP can be configured with SPs public key for encryption and the Private Key can be used for SP for decrypting. SP Metadata XML is available at "http://localhost:8585/api/v1/saml/metadata", `localhost` needs to be updated with the correct URI. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/developers/architecture/code-layout.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/developers/architecture/code-layout.md index c5612a0a7e15..9e23fa0a5d44 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/developers/architecture/code-layout.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/developers/architecture/code-layout.md @@ -4,7 +4,7 @@ slug: /developers/architecture/code-layout --- # Understand Code Layout -Use this document as a quick start guide to begin developing in OpenMetdata. Below, we address the following topics: +Use this document as a quick start guide to begin developing in OpenMetadata. Below, we address the following topics: 1. Schema (Metadata Models) 2. APIs diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/features/alerts-notifications/index.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/features/alerts-notifications/index.md index fbc5ed1bddc7..0196f31a4383 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/features/alerts-notifications/index.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/features/alerts-notifications/index.md @@ -38,7 +38,7 @@ For slack configuration you will need to get the endpoint URL of the channel whe - **Secret Key**: Secret key can be used to secure the webhook connection. ### MS Teams -For MS Teams configuration you will need to get the endpoint URL if the chanel where you wish to send the alerts. You can find this by going to the Teams channel where you want the posts to appear, clicking the three dots `...`, and clicking "Connectors". Then add the "Incoming Webhook" connector. Copy this connector's URL and supply it here to OpenMetadata. It may be in the form of `https://your-domain.webhook.office.com/webhookb2/...@.../IncomingWebhook/.../...`. For more on MS Teams webhooks, see [Create an Incoming Webhook](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook). Additionally, you can configure the following parameter: +For MS Teams configuration you will need to get the endpoint URL if the channel where you wish to send the alerts. You can find this by going to the Teams channel where you want the posts to appear, clicking the three dots `...`, and clicking "Connectors". Then add the "Incoming Webhook" connector. Copy this connector's URL and supply it here to OpenMetadata. It may be in the form of `https://your-domain.webhook.office.com/webhookb2/...@.../IncomingWebhook/.../...`. For more on MS Teams webhooks, see [Create an Incoming Webhook](https://learn.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook). Additionally, you can configure the following parameter: - **Batch Size**: size of the batch that will be sent to the endpoint. - **Connection Timeout**: timeout for the connection. - **Secret Key**: Secret key can be used to secure the webhook connection. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/features/data-insight/index.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/features/data-insight/index.md index fb777b35c07d..371a8f13d916 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/features/data-insight/index.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/features/data-insight/index.md @@ -65,7 +65,7 @@ This chart shows the top 10 data assets the most viewed in your platform. It off /%} **Page views by data assets** -This chart shows the total number of page views by asset type. This allows you to understand which asset familly drives the most interest in your organization +This chart shows the total number of page views by asset type. This allows you to understand which asset family drives the most interest in your organization {% image src="/images/v1.3/features/data-insight/views-by-assets.png" diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-collaboration/index.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-collaboration/index.md index d2854648318e..0695fd5dd1fb 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-collaboration/index.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-collaboration/index.md @@ -10,7 +10,7 @@ OpenMetadata is a catalyst for collaboration that brings data teams together to There are three important aspects of data collaboration in OpenMetadata: - **Conversations Threads:** Collaborate around data assets and tags by asking the right questions and discussing the details right within OpenMetadata. -- **Tasks:** Create tasks around data assets to create and update descriptions, request for tags, and initaite a glossary term approval workflow. +- **Tasks:** Create tasks around data assets to create and update descriptions, request for tags, and initiate a glossary term approval workflow. - **Announcements:** Announce to your entire team about the upcoming events and changes such as deprecation, deletion, or schema changes. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-discovery/discover.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-discovery/discover.md index df7159771469..facf46d12a45 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-discovery/discover.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-discovery/discover.md @@ -34,7 +34,7 @@ alt="Filter by the Type of Data Asset" caption="Filter by the Type of Data Asset" /%} -Users can navigate to the Explore page for specific type of data assets and use the filter options relevant to that data assset to narrow down the search. +Users can navigate to the Explore page for specific type of data assets and use the filter options relevant to that data asset to narrow down the search. ## Filter by Asset Owner A team or a user can own the data asset in OpenMetadata. Users can filter data assets by the asset owner. With information on the data asset owners, you can direct your questions to the right person or team. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-governance/glossary/approval.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-governance/glossary/approval.md index 000129b371ee..19db43b14945 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-governance/glossary/approval.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-governance/glossary/approval.md @@ -9,7 +9,7 @@ The business glossary plays a vital role in standardizing terminology in an orga Watch the video to learn more about **[Glossary Approval Workflow](https://www.youtube.com/watch?v=PgTcKQtpAks&t=4s)** -{% youtube videoId="PgTcKQtpAks" start="0:00" end="2:51" width="560px" height="315px" /%} +{% youtube videoId="PgTcKQtpAsk" start="0:00" end="2:51" width="560px" height="315px" /%} To automate the approval workflow, ensure that your Glossary has **Reviewers** assigned. If you glossary has reviewers assigned, the glossary approval workflow gets triggered when a **New Term** is added. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-insights/data-culture.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-insights/data-culture.md index 7d1d7520af10..bca82c99bb73 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-insights/data-culture.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-insights/data-culture.md @@ -22,7 +22,7 @@ Data is a shared responsibility of the organization and requires an end-to-end a ### 1. Data Needs Clear Ownership -All important data must be owned. Individuals should not own important data assets. Team ownership is preffered over User ownership. It also pushes the data responsibility to a team instead of an individual user. +All important data must be owned. Individuals should not own important data assets. Team ownership is preferred over User ownership. It also pushes the data responsibility to a team instead of an individual user. ### 2. Measure What Matters @@ -60,7 +60,7 @@ Data without description is hard to use, resulting in the loss of productivity. ### 4. Develop Data Vocabulary -Data vocabulary helps in the consistent understanding of data. In OpenMetdata, using the [Glossary](/how-to-guides/data-governance/glossary) feature, you can describe business terms and concepts in a single place. Also, the data assets can be labelled using these glossary terms in order to provide semantic meaning. +Data vocabulary helps in the consistent understanding of data. In OpenMetadata, using the [Glossary](/how-to-guides/data-governance/glossary) feature, you can describe business terms and concepts in a single place. Also, the data assets can be labelled using these glossary terms in order to provide semantic meaning. ### 5 Identify Important Data with Tiers diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-insights/report.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-insights/report.md index 7835ed0a4b65..dfb61897385f 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-insights/report.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/data-insights/report.md @@ -117,7 +117,7 @@ caption="Most Viewed Data Assets" ### Page Views by Data Assets -It helps to understand the total number of page views by asset type. This allows you to understand which asset familly drives the most interest in your organization +It helps to understand the total number of page views by asset type. This allows you to understand which asset family drives the most interest in your organization {% image src="/images/v1.3/how-to-guides/insights/pvda.png" diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/user-guide-data-users/data-ownership.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/user-guide-data-users/data-ownership.md index 5e3f711e4f07..53b8fdcb2855 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/user-guide-data-users/data-ownership.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/how-to-guides/user-guide-data-users/data-ownership.md @@ -38,13 +38,13 @@ If no owner is selected, and if the Database or Database Schema has a owner, the OpenMetadata supports Owner Propagation and the owner will be propagated based on a top-down hierarchy. The owner of the Database will be auto-propagated as the owner of the Database Schemas and Tables under it. Similarly, the owner of the Database Schema will be auto-propagated as the owner of the Tables under it. -- Owner Propogation does not work for data assets that already have an Owner assigned to them. If there is **no owner**, then an Owner will be assigned based on the hierarchy. +- Owner Propagation does not work for data assets that already have an Owner assigned to them. If there is **no owner**, then an Owner will be assigned based on the hierarchy. - If a Database or Database Schema has an Owner assigned, and you **delete the owner** from the Database Schema or Tables under it, then the Owner will be auto-assigned in this case based on the existing Owner details at the top hierarchy. - You can also assign a different owner manually. -## Team Ownership is Preffered +## Team Ownership is Preferred OpenMetadata is a data collaboration platform. We highly recommend Team Ownership of data assets, because individual users will only have part of the context about the data asset in question. Assigning team ownership will give access to all the members of a particular team. Only teams of the type ‘**Groups**’ can own data assets. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md index df9c047f4617..4cef3d0188d3 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/dashboard/qlikSenseConnection.md @@ -10,7 +10,7 @@ slug: /main-concepts/metadata-standard/schemas/entity/services/connections/dashb ## Properties - **`type`**: Service Type. Refer to *#/definitions/qlikSenseType*. Default: `QlikSense`. -- **`displayUrl`** *(string)*: Qlik Sense Base URL, used for genrating dashboard & chat url. +- **`displayUrl`** *(string)*: Qlik Sense Base URL, used for generating dashboard & chat url. - **`hostPort`** *(string)*: URL for the superset instance. - **`certificates`** - **`userDirectory`** *(string)*: User Directory. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConection.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConnection.md similarity index 97% rename from openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConection.md rename to openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConnection.md index bec45e0d0625..5062b8782032 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConection.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsConnection.md @@ -1,5 +1,5 @@ --- -title: adlsConection +title: adlsConnection slug: /main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsconection --- diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md index e1d53fdc5cfc..2e8c93fc0717 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/tests/table/tableColumnToMatchSet.md @@ -10,7 +10,7 @@ slug: /main-concepts/metadata-standard/schemas/tests/table/tablecolumntomatchset ## Properties - **`columnNames`** *(string)*: Expected columns of the table to match the ones in {columnValuesSet}. -- **`ordered`** *(boolean)*: Wether or not to considered the order of the list when performing the match. Default: `False`. +- **`ordered`** *(boolean)*: Whether or not to considered the order of the list when performing the match. Default: `False`. Documentation file automatically generated at 2022-07-14 10:51:34.749986. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/type/function.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/type/function.md index c39dd96b55cd..bb41a3761561 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/type/function.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/main-concepts/metadata-standard/schemas/type/function.md @@ -11,7 +11,7 @@ slug: /main-concepts/metadata-standard/schemas/type/function - **`name`** *(string)*: Name of the function. - **`input`** *(string)*: Description of input taken by the function. -- **`description`** *(string)*: Description fo the function. +- **`description`** *(string)*: Description for the function. - **`examples`** *(array)*: Examples of the function to help users author conditions. - **`parameterInputType`**: List of receivers to send mail to. Refer to *#/definitions/parameterType*. - **`paramAdditionalContext`**: Refer to *#/definitions/paramAdditionalContext*. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/menu.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/menu.md index 17408196149b..b615b75747a4 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/menu.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/menu.md @@ -1402,7 +1402,7 @@ site_menu: url: /main-concepts/metadata-standard/schemas/entity/services/connections/search/opensearchconnection - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / ServiceConnection url: /main-concepts/metadata-standard/schemas/entity/services/connections/serviceconnection - - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / Storage / AdlsConection + - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / Storage / AdlsConnection url: /main-concepts/metadata-standard/schemas/entity/services/connections/storage/adlsconection - category: Main Concepts / Metadata Standard / Schemas / Entity / Services / Connections / Storage / CustomStorageConnection url: /main-concepts/metadata-standard/schemas/entity/services/connections/storage/customstorageconnection diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/quick-start/local-docker-deployment.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/quick-start/local-docker-deployment.md index 15edd0ebf50d..373564d5c4d0 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/quick-start/local-docker-deployment.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/quick-start/local-docker-deployment.md @@ -95,7 +95,7 @@ Follow the instructions [here](https://docs.docker.com/compose/cli-command/#inst - Install [Docker for Windows](https://www.docker.com/products/docker-desktop) - Once installed, please follow the steps [here](https://docs.docker.com/desktop/windows/wsl/) and complete all the pre-requisites for a seamless installation and deployment. - After completion of the pre-requisites, please install `python3-pip` and `python3-venv` on your Ubuntu system. - - Command: `apt install python3-pip python3-venv` (Ensure that you have the priviledge to install packages, if not, please use Super User.) + - Command: `apt install python3-pip python3-venv` (Ensure that you have the privilege to install packages, if not, please use Super User.) ## Procedure diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/api-reference/data_insight_mixin.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/api-reference/data_insight_mixin.md index 5a3f94306796..9a2fb5d4f140 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/api-reference/data_insight_mixin.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/api-reference/data_insight_mixin.md @@ -160,7 +160,7 @@ Deletes web analytics events before a timestamp get_aggregated_data_insight_results( start_ts: 'int', end_ts: 'int', - data_insight_chart_nane: 'str', + data_insight_chart_name: 'str', data_report_index: 'str', params: 'Optional[dict]' = None ) → DataInsightChartResult @@ -174,7 +174,7 @@ _summary_ - `start_ts` (int): _description_ - `end_ts` (int): _description_ - - `data_insight_chart_nane` (str): _description_ + - `data_insight_chart_name` (str): _description_ - `data_report_index` (str): _description_ - `params` (Optional[dict], optional): _description_. Defaults to None. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/api-reference/tests_mixin.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/api-reference/tests_mixin.md index 824a4ff7a4b4..a21fd26a7995 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/api-reference/tests_mixin.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/api-reference/tests_mixin.md @@ -203,7 +203,7 @@ Get or create a test definition - `test_definition_description` (Optional[str], optional): description for the test definition. Defaults to None. - `entity_type` (Optional[EntityType], optional): entity type (COLUMN or TABLE). Defaults to None. - `test_platforms` (Optional[List[TestPlatform]], optional): test platforms. Defaults to None. - - `test_case_parameter_definition` (Optional[List[TestCaseParameterDefinition]], optional): parameters for the test case defintion. Defaults to None. + - `test_case_parameter_definition` (Optional[List[TestCaseParameterDefinition]], optional): parameters for the test case definition. Defaults to None. diff --git a/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/ingestion/lineage.md b/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/ingestion/lineage.md index d91bbba6dfa9..1406f7eb9ca0 100644 --- a/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/ingestion/lineage.md +++ b/openmetadata-docs/content/v1.3.x-SNAPSHOT/sdk/python/ingestion/lineage.md @@ -440,7 +440,7 @@ workflowConfig: authProvider: ``` -- **serviceName**: Name of the database service which contains tha table involved in query. +- **serviceName**: Name of the database service which contains the table involved in query. - **query**: You can specify the raw sql query within the yaml file itself. - **filePath**: In case the query is too big then you can also save query in a file and pass the path to the file in this field. - **parseTimeout**: Timeout for the lineage parsing process. diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/dashboard/qlikSenseConnection.json b/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/dashboard/qlikSenseConnection.json index e58788ceaca0..87f8a2217c07 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/dashboard/qlikSenseConnection.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/dashboard/qlikSenseConnection.json @@ -96,7 +96,7 @@ "hostPort": { "expose": true, "title": "Qlik Engine JSON API Websocket URL", - "description": "URL for the superset instance.", + "description": "URL for the Qlik instance.", "type": "string", "format": "uri" }, diff --git a/openmetadata-spec/src/main/resources/json/schema/type/function.json b/openmetadata-spec/src/main/resources/json/schema/type/function.json index 1cae2a380422..e14ecc5689d4 100644 --- a/openmetadata-spec/src/main/resources/json/schema/type/function.json +++ b/openmetadata-spec/src/main/resources/json/schema/type/function.json @@ -36,7 +36,7 @@ "type" : "string" }, "description" : { - "description": "Description fo the function.", + "description": "Description for the function.", "type" : "string" }, "examples" : { diff --git a/openmetadata-ui/src/main/resources/ui/src/utils/AuthProvider.util.ts b/openmetadata-ui/src/main/resources/ui/src/utils/AuthProvider.util.ts index d79143bac654..58acfa98a616 100644 --- a/openmetadata-ui/src/main/resources/ui/src/utils/AuthProvider.util.ts +++ b/openmetadata-ui/src/main/resources/ui/src/utils/AuthProvider.util.ts @@ -282,7 +282,7 @@ export const getUrlPathnameExpiryAfterRoute = () => { /** * @exp expiry of token - * @isExpired wether token is already expired or not + * @isExpired Whether token is already expired or not * @diff Difference between token expiry & current time in ms * @timeoutExpiry time in ms for try to silent sign-in * @returns exp, isExpired, diff, timeoutExpiry