diff --git a/ingestion/src/metadata/examples/workflows/datafactory.yaml b/ingestion/src/metadata/examples/workflows/datafactory.yaml new file mode 100644 index 000000000000..d9e7e7b36b21 --- /dev/null +++ b/ingestion/src/metadata/examples/workflows/datafactory.yaml @@ -0,0 +1,29 @@ +source: + type: datafactory + serviceName: local_datafactory + serviceConnection: + config: + type: DataFactory + configSource: + clientId: client_id + clientSecret: client_secret + tenantId: tenant_id + accountName: account_name + subscription_id: subscription_id + resource_group_name: resource_group_name + factory_name: factory_name + run_filter_days : 7 + sourceConfig: + config: + type: PipelineMetadata +sink: + type: metadata-rest + config: {} +workflowConfig: + loggerLevel: INFO # DEBUG, INFO, WARN or ERROR + openMetadataServerConfig: + hostPort: http://localhost:8585/api + authProvider: openmetadata + securityConfig: + jwtToken: "eyJraWQiOiJHYjM4OWEtOWY3Ni1nZGpzLWE5MmotMDI0MmJrOTQzNTYiLCJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJhZG1pbiIsImlzQm90IjpmYWxzZSwiaXNzIjoib3Blbi1tZXRhZGF0YS5vcmciLCJpYXQiOjE2NjM5Mzg0NjIsImVtYWlsIjoiYWRtaW5Ab3Blbm1ldGFkYXRhLm9yZyJ9.tS8um_5DKu7HgzGBzS1VTA5uUjKWOCU0B_j08WXBiEC0mr0zNREkqVfwFDD-d24HlNEbrqioLsBuFRiwIWKc1m_ZlVQbG7P36RUxhuv2vbSp80FKyNM-Tj93FDzq91jsyNmsQhyNv_fNr3TXfzzSPjHt8Go0FMMP66weoKMgW2PbXlhVKwEuXUHyakLLzewm9UMeQaEiRzhiTMU3UkLXcKbYEJJvfNFcLwSl9W8JCO_l0Yj3ud-qt_nQYEZwqW6u5nfdQllN133iikV4fM5QZsMCnm8Rq1mvLR0y9bmJiD7fwM1tmJ791TUWqmKaTnP49U493VanKpUAfzIiOiIbhg" + \ No newline at end of file diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/collate-menu.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/collate-menu.md index 231027a78a55..38bbc2814a76 100644 --- a/openmetadata-docs/content/v1.6.x-SNAPSHOT/collate-menu.md +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/collate-menu.md @@ -349,6 +349,11 @@ site_menu: isCollateOnly: true - category: Connectors / Pipeline / Matillion / Run Externally url: /connectors/pipeline/matillion/yaml + - category: Connectors / Pipeline / DataFactory + url: /connectors/pipeline/datafactory + isCollateOnly: true + - category: Connectors / Pipeline / DataFactory / Run Externally + url: /connectors/pipeline/datafactory/yaml - category: Connectors / Pipeline / Databricks Pipeline url: /connectors/pipeline/databricks-pipeline - category: Connectors / Pipeline / Databricks Pipeline / Run Externally diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/pipeline/datafactory/index.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/pipeline/datafactory/index.md new file mode 100644 index 000000000000..a6ea4436873a --- /dev/null +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/pipeline/datafactory/index.md @@ -0,0 +1,131 @@ +--- +title: Data Factory +slug: /connectors/pipeline/datafactory +collate: true +--- + +{% connectorDetailsHeader +name="DataFactory" +stage="PROD" +platform="Collate" +availableFeatures=["Pipelines", "Pipeline Status", "Lineage"] +unavailableFeatures=["Owners", "Tags"] +/ %} + + +In this section, we provide guides and references to use the Azure DataFactory connector. + +Configure and schedule Azure DataFactory metadata and profiler workflows from the OpenMetadata UI: + +- [Requirements](#requirements) + - [Data Factory Versions](#data-factory-versions) +- [Metadata Ingestion](#metadata-ingestion) + - [Service Name](#service-name) + - [Connection Details](#connection-details) + - [Metadata Ingestion Options](#metadata-ingestion-options) +- [Troubleshooting](#troubleshooting) + - [Workflow Deployment Error](#workflow-deployment-error) + +{% partial file="/v1.5/connectors/ingestion-modes-tiles.md" variables={yamlPath: "/connectors/pipeline/datafactory/yaml"} /%} + +## Requirements + +### Data Factory Versions + +The Ingestion framework uses [Azure Data Factory APIs](https://learn.microsoft.com/en-us/rest/api/datafactory/v2) to connect to the Data Factory and fetch metadata. + +You can find further information on the Azure Data Factory connector in the [docs](https://docs.open-metadata.org/connectors/pipeline/datafactory). + +## Permissions + +Ensure that the service principal or managed identity you’re using has the necessary permissions in the Data Factory resource (Reader, Contributor or Data Factory Contributor role at minimum). + + +## Metadata Ingestion + +{% partial + file="/v1.5/connectors/metadata-ingestion-ui.md" + variables={ + connector: "DataFactory", + selectServicePath: "/images/v1.6/connectors/datafactory/select-service.png", + addNewServicePath: "/images/v1.6/connectors/datafactory/add-new-service.png", + serviceConnectionPath: "/images/v1.6/connectors/datafactory/service-connection.png", + } +/%} + +{% stepsContainer %} +{% extraContent parentTagName="stepsContainer" %} + +#### Connection Details + +- **Subscription ID**: Your Azure subscription’s unique identifier. In the Azure portal, navigate to Subscriptions > Your Subscription > Overview. You’ll see the subscription ID listed there. + +- **Resource Group name** : This is the name of the resource group that contains your Data Factory instance. In the Azure portal, navigate to Resource Groups. Find your resource group, and note the name. + +- **Azure Data Factory name** : The name of your Data Factory instance. In the Azure portal, navigate to Data Factories and find your Data Factory. The Data Factory name will be listed there. + +- **Azure Data Factory pipeline runs day filter** : The days range when filtering pipeline runs. It specifies how many days back from the current date to look for pipeline runs, and filter runs within the given period of days. Default is `7` days. `Optional` + + +## Azure Data Factory Configuration + +- **Client ID** : To get the Client ID (also known as application ID), follow these steps: + +1. Log into [Microsoft Azure](https://ms.portal.azure.com/#allservices). +2. Search for `App registrations` and select the `App registrations link`. +3. Select the `Azure AD` app you're using for this connection. +4. From the Overview section, copy the `Application (client) ID`. + + +- **Client Secret** : To get the client secret, follow these steps: + +1. Log into [Microsoft Azure](https://ms.portal.azure.com/#allservices). +2. Search for `App registrations` and select the `App registrations link`. +3. Select the `Azure AD` app you're using for this connection. +4. Under `Manage`, select `Certificates & secrets`. +5. Under `Client secrets`, select `New client secret`. +6. In the `Add a client secret` pop-up window, provide a description for your application secret. Choose when the application should expire, and select `Add`. +7. From the `Client secrets` section, copy the string in the `Value` column of the newly created application secret. + + +- **Tenant ID** : To get the tenant ID, follow these steps: + +1. Log into [Microsoft Azure](https://ms.portal.azure.com/#allservices). +2. Search for `App registrations` and select the `App registrations link`. +3. Select the `Azure AD` app you're using for Power BI. +4. From the `Overview` section, copy the `Directory (tenant) ID`. + +- **Account Name** : Here are the step-by-step instructions for finding the account name for an Azure Data Lake Storage account: + +1. Sign in to the Azure portal and navigate to the `Storage accounts` page. +2. Find the Data Lake Storage account you want to access and click on its name. +3. In the account overview page, locate the `Account name` field. This is the unique identifier for the Data Lake Storage account. +4. You can use this account name to access and manage the resources associated with the account, such as creating and managing containers and directories. + + + +{% /extraContent %} + +{% partial file="/v1.5/connectors/test-connection.md" /%} + +{% partial file="/v1.5/connectors/pipeline/configure-ingestion.md" /%} + +{% partial file="/v1.5/connectors/ingestion-schedule-and-deploy.md" /%} + +{% /stepsContainer %} + +## Displaying Lineage Information +Steps to retrieve and display the lineage information for a Data Factory service. +1. Ingest Source and Sink Database Metadata: Identify both the source and sink database used by the Azure Data Factory service for example Redshift. Ingest metadata for these database. +2. Ingest Data Factory Service Metadata: Finally, Ingest your DData Factory service. + +By successfully completing these steps, the lineage information for the service will be displayed. + + +{% partial file="/v1.5/connectors/troubleshooting.md" /%} + +### Missing Lineage +If lineage information is not displayed for a Data Factory service, follow these steps to diagnose the issue. +1. *Permissions*: Ensure that the service principal or managed identity you’re using has the necessary permissions in the Data Factory resource. (Reader, Contributor or Data Factory Contributor role at minimum). +2. *Metadata Ingestion*: Ensure that metadata for both the source and sink database is ingested and passed to the lineage system. This typically involves configuring the relevant connectors to capture and transmit this information. +3. *Run Successful*: Ensure that the Pipeline Run is successful. diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/pipeline/datafactory/yaml.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/pipeline/datafactory/yaml.md new file mode 100644 index 000000000000..6346288551d2 --- /dev/null +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/pipeline/datafactory/yaml.md @@ -0,0 +1,205 @@ +--- +title: Run the Data Factory Connector Externally +slug: /connectors/pipeline/datafactory/yaml +collate: true +--- + +{% connectorDetailsHeader +name="DataFactory" +stage="PROD" +platform="Collate" +availableFeatures=["Pipelines", "Pipeline Status", "Lineage"] +unavailableFeatures=["Owners", "Tags"] +/ %} + + +In this section, we provide guides and references to use the Azure DataFactory connector. + +Configure and schedule Azure DataFactory metadata and profiler workflows from the OpenMetadata UI: + +- [Requirements](#requirements) + - [Data Factory Versions](#data-factory-versions) +- [Metadata Ingestion](#metadata-ingestion) + +{% partial file="/v1.5/connectors/external-ingestion-deployment.md" /%} + +## Requirements + +### Data Factory Versions + +The Ingestion framework uses [Azure Data Factory APIs](https://learn.microsoft.com/en-us/rest/api/datafactory/v2) to connect to the Data Factory and fetch metadata. + +You can find further information on the Azure Data Factory connector in the [docs](https://docs.open-metadata.org/connectors/pipeline/datafactory). + +## Permissions + +Ensure that the service principal or managed identity you’re using has the necessary permissions in the Data Factory resource (Reader, Contributor or Data Factory Contributor role at minimum). + +### Python Requirements + +{% partial file="/v1.5/connectors/python-requirements.md" /%} + +To run the Data Factory ingestion, you will need to install: + +```bash +pip3 install "openmetadata-ingestion[datafactory]" +``` + +## Metadata Ingestion + +All connectors are defined as JSON Schemas. +[Here](https://github.com/open-metadata/OpenMetadata/blob/main/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/pipeline/datafactoryConnection.json) +you can find the structure to create a connection to Data Factory. + +In order to create and run a Metadata Ingestion workflow, we will follow +the steps to create a YAML configuration able to connect to the source, +process the Entities if needed, and reach the OpenMetadata server. + +The workflow is modeled around the following +[JSON Schema](https://github.com/open-metadata/OpenMetadata/blob/main/openmetadata-spec/src/main/resources/json/schema/metadataIngestion/workflow.json) + +### 1. Define the YAML Config + +This is a sample config for Data Factory: + +{% codePreview %} + +{% codeInfoContainer %} + +#### Source Configuration - Service Connection + +{% codeInfo srNumber=1 %} + +**clientId**: To get the Client ID (also known as application ID), follow these steps: + +1. Log into [Microsoft Azure](https://ms.portal.azure.com/#allservices). +2. Search for `App registrations` and select the `App registrations link`. +3. Select the `Azure AD` app you're using for this connection. +4. From the Overview section, copy the `Application (client) ID`. + +{% /codeInfo %} + + +{% codeInfo srNumber=2 %} + +**clientSecret**: To get the client secret, follow these steps: + +1. Log into [Microsoft Azure](https://ms.portal.azure.com/#allservices). +2. Search for `App registrations` and select the `App registrations link`. +3. Select the `Azure AD` app you're using for this connection. +4. Under `Manage`, select `Certificates & secrets`. +5. Under `Client secrets`, select `New client secret`. +6. In the `Add a client secret` pop-up window, provide a description for your application secret. Choose when the application should expire, and select `Add`. +7. From the `Client secrets` section, copy the string in the `Value` column of the newly created application secret. + +{% /codeInfo %} + + +{% codeInfo srNumber=3 %} + +**tenantId**: To get the tenant ID, follow these steps: + +1. Log into [Microsoft Azure](https://ms.portal.azure.com/#allservices). +2. Search for `App registrations` and select the `App registrations link`. +3. Select the `Azure AD` app you're using for Power BI. +4. From the `Overview` section, copy the `Directory (tenant) ID`. + +{% /codeInfo %} + + +{% codeInfo srNumber=4 %} + +**accountName**: Here are the step-by-step instructions for finding the account name for an Azure Data Lake Storage account: + +1. Sign in to the Azure portal and navigate to the `Storage accounts` page. +2. Find the Data Lake Storage account you want to access and click on its name. +3. In the account overview page, locate the `Account name` field. This is the unique identifier for the Data Lake Storage account. +4. You can use this account name to access and manage the resources associated with the account, such as creating and managing containers and directories. + +{% /codeInfo %} + + +{% codeInfo srNumber=5 %} + +**subscription_id**: Your Azure subscription’s unique identifier. In the Azure portal, navigate to Subscriptions > Your Subscription > Overview. You’ll see the subscription ID listed there. + +{% /codeInfo %} + + +{% codeInfo srNumber=6 %} + +**resource_group_name**: This is the name of the resource group that contains your Data Factory instance. In the Azure portal, navigate to Resource Groups. Find your resource group, and note the name. + +{% /codeInfo %} + + +{% codeInfo srNumber=7 %} + +**factory_name**: The name of your Data Factory instance. In the Azure portal, navigate to Data Factories and find your Data Factory. The Data Factory name will be listed there. + +{% /codeInfo %} + + +{% codeInfo srNumber=8 %} + +**run_filter_days**: The days range when filtering pipeline runs. It specifies how many days back from the current date to look for pipeline runs, and filter runs within the given period of days. Default is `7` days. `Optional` + +{% /codeInfo %} + + +{% partial file="/v1.5/connectors/yaml/pipeline/source-config-def.md" /%} + +{% partial file="/v1.5/connectors/yaml/ingestion-sink-def.md" /%} + +{% partial file="/v1.5/connectors/yaml/workflow-config-def.md" /%} + +{% /codeInfoContainer %} + +{% codeBlock fileName="filename.yaml" %} + + +```yaml {% isCodeBlock=true %} +source: + type: datafactory + serviceName: datafactory_source + serviceConnection: + config: + type: DataFactory + configSource: +``` +```yaml {% srNumber=1 %} + clientId: client_id +``` +```yaml {% srNumber=2 %} + clientSecret: client_secret +``` +```yaml {% srNumber=3 %} + tenantId: tenant_id +``` +```yaml {% srNumber=4 %} + accountName: account_name +``` +```yaml {% srNumber=5 %} + subscription_id: subscription_id +``` +```yaml {% srNumber=6 %} + resource_group_name: resource_group_name +``` +```yaml {% srNumber=7 %} + factory_name: factory_name +``` +```yaml {% srNumber=8 %} + run_filter_days: 7 +``` + +{% partial file="/v1.5/connectors/yaml/pipeline/source-config.md" /%} + +{% partial file="/v1.5/connectors/yaml/ingestion-sink.md" /%} + +{% partial file="/v1.5/connectors/yaml/workflow-config.md" /%} + +{% /codeBlock %} + +{% /codePreview %} + +{% partial file="/v1.5/connectors/yaml/ingestion-cli.md" /%} diff --git a/openmetadata-docs/images/v1.6/connectors/datafactory/add-new-service.png b/openmetadata-docs/images/v1.6/connectors/datafactory/add-new-service.png new file mode 100644 index 000000000000..cd7a0f8f9747 Binary files /dev/null and b/openmetadata-docs/images/v1.6/connectors/datafactory/add-new-service.png differ diff --git a/openmetadata-docs/images/v1.6/connectors/datafactory/select-service.png b/openmetadata-docs/images/v1.6/connectors/datafactory/select-service.png new file mode 100644 index 000000000000..77397746588f Binary files /dev/null and b/openmetadata-docs/images/v1.6/connectors/datafactory/select-service.png differ diff --git a/openmetadata-docs/images/v1.6/connectors/datafactory/service-connection.png b/openmetadata-docs/images/v1.6/connectors/datafactory/service-connection.png new file mode 100644 index 000000000000..3af6c1d7d8a1 Binary files /dev/null and b/openmetadata-docs/images/v1.6/connectors/datafactory/service-connection.png differ diff --git a/openmetadata-service/src/main/resources/json/data/testConnections/pipeline/datafactory.json b/openmetadata-service/src/main/resources/json/data/testConnections/pipeline/datafactory.json new file mode 100644 index 000000000000..d4fb5529e45b --- /dev/null +++ b/openmetadata-service/src/main/resources/json/data/testConnections/pipeline/datafactory.json @@ -0,0 +1,21 @@ +{ + "name": "DataFactory", + "displayName": "Azure DataFactory Test Connection", + "description": "This Test Connection validates the access against the server and basic metadata extraction of pipelines.", + "steps": [ + { + "name": "GetPipelines", + "description": "Validate that the API can fetch pipelines.", + "errorMessage": "Failed to fetch pipelines info please validate the credentials or validate if user has access to fetch pipelines", + "shortCircuit": true, + "mandatory": true + }, + { + "name": "GetRuns", + "description": "Validate that the API can fetch pipeline runs.", + "errorMessage": "Failed to fetch pipelines runs info please validate the credentials or validate if user has access to fetch pipelines runs or the pipeline has run ", + "shortCircuit": true, + "mandatory": true + } + ] +} \ No newline at end of file diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/pipeline/datafactoryConnection.json b/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/pipeline/datafactoryConnection.json new file mode 100644 index 000000000000..1eedb5348217 --- /dev/null +++ b/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/pipeline/datafactoryConnection.json @@ -0,0 +1,53 @@ +{ + "$id": "https://open-metadata.org/schema/entity/services/connections/pipeline/datafactoryConnection.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "DataFactoryConnection", + "description": "Azure Data Factory Connection Config", + "type": "object", + "javaType": "org.openmetadata.schema.services.connections.pipeline.DataFactoryConnection", + "definitions": { + "DataFactoryType": { + "description": "Service type.", + "type": "string", + "enum": ["DataFactory"], + "default": "DataFactory" + } + }, + "properties": { + "type": { + "title": "Service Type", + "description": "Service Type", + "$ref": "#/definitions/DataFactoryType", + "default": "DataFactory" + }, + "subscription_id": { + "title": "Subscription ID", + "description": "The azure subscription identifier.", + "type": "string" + }, + "resource_group_name": { + "title": "Resource Group name", + "description": "The name of your resource group the data factory is associated with.", + "type": "string" + }, + "factory_name": { + "title": "Azure Data Factory name", + "description": "The name of your azure data factory.", + "type": "string" + }, + "run_filter_days": { + "title": "Azure Data Factory pipeline runs day filter", + "description": "Number of days in the past to filter pipeline runs.", + "type": "integer", + "default": 7 + }, + "configSource": { + "title": "Azure Data Factory Configuration", + "description": "Available sources to fetch metadata.", + "$ref": "../../../../security/credentials/azureCredentials.json" + } + }, + "additionalProperties": false, + "required": ["subscription_id", "resource_group_name", "factory_name"] + } + \ No newline at end of file diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/services/pipelineService.json b/openmetadata-spec/src/main/resources/json/schema/entity/services/pipelineService.json index 5f2fc56a1f4f..a2f4536998f2 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/services/pipelineService.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/services/pipelineService.json @@ -33,7 +33,8 @@ "KafkaConnect", "DBTCloud", "Matillion", - "Stitch" + "Stitch", + "DataFactory" ], "javaEnums": [ { @@ -84,6 +85,9 @@ { "name": "Matillion" }, + { + "name": "DataFactory" + }, { "name": "Stitch" } @@ -148,6 +152,9 @@ { "$ref": "./connections/pipeline/matillionConnection.json" }, + { + "$ref": "./connections/pipeline/datafactoryConnection.json" + }, { "$ref": "./connections/pipeline/stitchConnection.json" } diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Pipeline/DataFactory.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Pipeline/DataFactory.md new file mode 100644 index 000000000000..16959622c18a --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Pipeline/DataFactory.md @@ -0,0 +1,87 @@ +# Data Factory +In this section, we provide guides and references to use the DataFactory connector. + +## Requirements + +The Ingestion framework uses [Azure Data Factory APIs](https://learn.microsoft.com/en-us/rest/api/datafactory/v2) to connect to the Data Factory and fetch metadata. + +You can find further information on the Azure Data Factory connector in the [docs](https://docs.open-metadata.org/connectors/pipeline/datafactory). + + +## Permissions + +Ensure that the service principal or managed identity you’re using has the necessary permissions in the Data Factory resource. (Reader, Contributor or Data Factory Contributor role at minimum). + + +## Connection Details +$$section +### Subscription ID $(id="subscription_id") +Your Azure subscription’s unique identifier. In the Azure portal, navigate to Subscriptions > Your Subscription > Overview. You’ll see the subscription ID listed there. +$$ + +$$section +### Resource Group name $(id="resource_group_name") +This is the name of the resource group that contains your Data Factory instance. In the Azure portal, navigate to Resource Groups. Find your resource group, and note the name. +$$ + +$$section +### Azure Data Factory name $(id="factory_name") +The name of your Data Factory instance. In the Azure portal, navigate to Data Factories and find your Data Factory. The Data Factory name will be listed there. +$$ + +$$section +### Azure Data Factory pipeline runs day filter $(id="run_filter_days") +The days range when filtering pipeline runs. It specifies how many days back from the current date to look for pipeline runs, and filter runs within the given period of days. Default is `7` days. `Optional` +$$ + +$$section +### Azure Data Factory Configuration $(id="configSource") +$$ + +$$section +### Client ID $(id="clientId") +To get the Client ID (also known as application ID), follow these steps: + +1. Log into [Microsoft Azure](https://ms.portal.azure.com/#allservices). +2. Search for `App registrations` and select the `App registrations link`. +3. Select the `Azure AD` app you're using for this connection. +4. From the Overview section, copy the `Application (client) ID`. + +$$ + +$$section +### Client Secret $(id="clientSecret") +To get the client secret, follow these steps: + +1. Log into [Microsoft Azure](https://ms.portal.azure.com/#allservices). +2. Search for `App registrations` and select the `App registrations link`. +3. Select the `Azure AD` app you're using for this connection. +4. Under `Manage`, select `Certificates & secrets`. +5. Under `Client secrets`, select `New client secret`. +6. In the `Add a client secret` pop-up window, provide a description for your application secret. Choose when the application should expire, and select `Add`. +7. From the `Client secrets` section, copy the string in the `Value` column of the newly created application secret. + +$$ + +$$section +### Tenant ID $(id="tenantId") + +To get the tenant ID, follow these steps: + +1. Log into [Microsoft Azure](https://ms.portal.azure.com/#allservices). +2. Search for `App registrations` and select the `App registrations link`. +3. Select the `Azure AD` app you're using for Power BI. +4. From the `Overview` section, copy the `Directory (tenant) ID`. +$$ + +$$section +### Account Name $(id="accountName") + +Here are the step-by-step instructions for finding the account name for an Azure Data Lake Storage account: + +1. Sign in to the Azure portal and navigate to the `Storage accounts` page. +2. Find the Data Lake Storage account you want to access and click on its name. +3. In the account overview page, locate the `Account name` field. This is the unique identifier for the Data Lake Storage account. +4. You can use this account name to access and manage the resources associated with the account, such as creating and managing containers and directories. +$$ + diff --git a/openmetadata-ui/src/main/resources/ui/src/utils/ServiceUtilClassBase.ts b/openmetadata-ui/src/main/resources/ui/src/utils/ServiceUtilClassBase.ts index adcab3a38e08..690cae8dd6b5 100644 --- a/openmetadata-ui/src/main/resources/ui/src/utils/ServiceUtilClassBase.ts +++ b/openmetadata-ui/src/main/resources/ui/src/utils/ServiceUtilClassBase.ts @@ -145,6 +145,7 @@ class ServiceUtilClassBase { APIServiceType.Webhook, MlModelServiceType.VertexAI, PipelineServiceType.Matillion, + PipelineServiceType.DataFactory, ]; DatabaseServiceTypeSmallCase = this.convertEnumToLowerCase<