diff --git a/.github/workflows/py-cli-e2e-tests.yml b/.github/workflows/py-cli-e2e-tests.yml index af0e8a79953e..20572da9a070 100644 --- a/.github/workflows/py-cli-e2e-tests.yml +++ b/.github/workflows/py-cli-e2e-tests.yml @@ -20,12 +20,12 @@ on: required: True default: '["bigquery", "dbt_redshift", "metabase", "mssql", "mysql", "redash", "snowflake", "tableau", "powerbi", "vertica", "python", "redshift", "quicksight", "datalake_s3", "postgres", "oracle", "athena", "bigquery_multiple_project"]' debug: - description: "If Debugging the Pipeline, Slack and Sonar events won't be triggered [default, true or false]" + description: "If Debugging the Pipeline, Slack and Sonar events won't be triggered [default, true or false]. Default will trigger only on main branch." required: False default: "default" env: - DEBUG: ${{ inputs.debug == 'true' || (inputs.debug == 'default' && github.ref != 'refs/heads/main') }}' }} + DEBUG: "${{ (inputs.debug == 'default' && github.ref == 'refs/heads/main' && 'false') || (inputs.debug == 'default' && github.ref != 'refs/heads/main' && 'true') || inputs.debug || 'false' }}" permissions: id-token: write @@ -38,7 +38,7 @@ jobs: outputs: DEBUG: ${{ env.DEBUG }} steps: - - run: echo "null" + - run: echo "INPUTS_DEBUG=${{ inputs.debug }}, GITHUB_REF=${{ github.ref }}, DEBUG=$DEBUG" py-cli-e2e-tests: runs-on: ubuntu-latest @@ -189,16 +189,13 @@ jobs: docker compose down --remove-orphans sudo rm -rf ${PWD}/docker-volume - - uses: austenstone/job-id@v1 - id: job-id - - name: Slack on Failure if: steps.e2e-test.outcome != 'success' && steps.python-e2e-test.outcome != 'success' && env.DEBUG == 'false' uses: slackapi/slack-github-action@v1.23.0 with: payload: | { - "text": "🔥 Failed E2E Test for: ${{ matrix.e2e-test }} 🔥\nLogs: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/job/${{ steps.job-id.outputs.job-id }}" + "text": "🔥 Failed E2E Test for: ${{ matrix.e2e-test }} 🔥\nLogs: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" } env: SLACK_WEBHOOK_URL: ${{ secrets.E2E_SLACK_WEBHOOK }} diff --git a/.gitignore b/.gitignore index 83c2a683bd43..277a4b07b158 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ # Created by .ignore support plugin (hsz.mobi) # Maven +.venv __pycache__ target/ pom.xml.tag diff --git a/bootstrap/sql/migrations/native/1.6.0/mysql/postDataMigrationSQLScript.sql b/bootstrap/sql/migrations/native/1.6.0/mysql/postDataMigrationSQLScript.sql index 1322603c3321..0ac744e4c475 100644 --- a/bootstrap/sql/migrations/native/1.6.0/mysql/postDataMigrationSQLScript.sql +++ b/bootstrap/sql/migrations/native/1.6.0/mysql/postDataMigrationSQLScript.sql @@ -52,3 +52,6 @@ WHERE serviceType IN ('Athena','BigQuery','Mssql','Mysql','Oracle','Postgres','R update dbservice_entity set json = JSON_SET(json, '$.connection.config.supportsSystemProfile', true) where serviceType in ('Snowflake', 'Redshift', 'BigQuery'); + +-- Update all rows in the consumers_dlq table to set the source column to 'publisher' +UPDATE consumers_dlq SET source = 'publisher'; \ No newline at end of file diff --git a/bootstrap/sql/migrations/native/1.6.0/mysql/schemaChanges.sql b/bootstrap/sql/migrations/native/1.6.0/mysql/schemaChanges.sql index f13cbe433702..7f90aff02aef 100644 --- a/bootstrap/sql/migrations/native/1.6.0/mysql/schemaChanges.sql +++ b/bootstrap/sql/migrations/native/1.6.0/mysql/schemaChanges.sql @@ -6,4 +6,10 @@ CREATE TABLE IF NOT EXISTS apps_data_store ( identifier VARCHAR(256) NOT NULL, type VARCHAR(256) NOT NULL, json JSON NOT NULL -); \ No newline at end of file +); + +-- Add the source column to the consumers_dlq table +ALTER TABLE consumers_dlq ADD COLUMN source VARCHAR(255); + +-- Create an index on the source column in the consumers_dlq table +CREATE INDEX idx_consumers_dlq_source ON consumers_dlq (source); \ No newline at end of file diff --git a/bootstrap/sql/migrations/native/1.6.0/postgres/postDataMigrationSQLScript.sql b/bootstrap/sql/migrations/native/1.6.0/postgres/postDataMigrationSQLScript.sql index b45dd48f5c50..141a4e6ce918 100644 --- a/bootstrap/sql/migrations/native/1.6.0/postgres/postDataMigrationSQLScript.sql +++ b/bootstrap/sql/migrations/native/1.6.0/postgres/postDataMigrationSQLScript.sql @@ -68,3 +68,6 @@ WHERE serviceType IN ('Athena','BigQuery','Mssql','Mysql','Oracle','Postgres','R UPDATE dbservice_entity SET json = jsonb_set(json::jsonb, '{connection,config,supportsSystemProfile}', 'true'::jsonb) WHERE serviceType IN ('Snowflake', 'Redshift', 'BigQuery'); + +-- Update all rows in the consumers_dlq table to set the source column to 'publisher' +UPDATE consumers_dlq SET source = 'publisher'; \ No newline at end of file diff --git a/bootstrap/sql/migrations/native/1.6.0/postgres/schemaChanges.sql b/bootstrap/sql/migrations/native/1.6.0/postgres/schemaChanges.sql index f13cbe433702..7f90aff02aef 100644 --- a/bootstrap/sql/migrations/native/1.6.0/postgres/schemaChanges.sql +++ b/bootstrap/sql/migrations/native/1.6.0/postgres/schemaChanges.sql @@ -6,4 +6,10 @@ CREATE TABLE IF NOT EXISTS apps_data_store ( identifier VARCHAR(256) NOT NULL, type VARCHAR(256) NOT NULL, json JSON NOT NULL -); \ No newline at end of file +); + +-- Add the source column to the consumers_dlq table +ALTER TABLE consumers_dlq ADD COLUMN source VARCHAR(255); + +-- Create an index on the source column in the consumers_dlq table +CREATE INDEX idx_consumers_dlq_source ON consumers_dlq (source); \ No newline at end of file diff --git a/ingestion/setup.py b/ingestion/setup.py index 65b9fd521cc2..2903be311986 100644 --- a/ingestion/setup.py +++ b/ingestion/setup.py @@ -224,6 +224,7 @@ "elasticsearch": { VERSIONS["elasticsearch8"], }, # also requires requests-aws4auth which is in base + "exasol": {"sqlalchemy_exasol>=5,<6"}, "glue": {VERSIONS["boto3"]}, "great-expectations": {VERSIONS["great-expectations"]}, "greenplum": {*COMMONS["postgres"]}, diff --git a/ingestion/src/metadata/data_quality/validations/column/base/columnValuesToBeInSet.py b/ingestion/src/metadata/data_quality/validations/column/base/columnValuesToBeInSet.py index a587bb986983..cd5029304f53 100644 --- a/ingestion/src/metadata/data_quality/validations/column/base/columnValuesToBeInSet.py +++ b/ingestion/src/metadata/data_quality/validations/column/base/columnValuesToBeInSet.py @@ -45,6 +45,7 @@ def run_validation(self) -> TestCaseResult: Returns: TestCaseResult: """ + matched = False allowed_values = self.get_test_case_param_value( self.test_case.parameterValues, # type: ignore "allowedValues", @@ -58,11 +59,12 @@ def run_validation(self) -> TestCaseResult: try: column: Union[SQALikeColumn, Column] = self._get_column_name() res = self._run_results(Metrics.COUNT_IN_SET, column, values=allowed_values) + matched = res > 0 if match_enum: count = self._run_results( Metrics.ROW_COUNT, column, values=allowed_values ) - res = count - res + matched = count - res == 0 except (ValueError, RuntimeError) as exc: msg = f"Error computing {self.test_case.fullyQualifiedName}: {exc}" # type: ignore logger.debug(traceback.format_exc()) @@ -81,7 +83,7 @@ def run_validation(self) -> TestCaseResult: return self.get_test_case_result_object( self.execution_date, - self.get_test_case_status(res == 0 if match_enum else res >= 1), + self.get_test_case_status(matched), f"Found countInSet={res}.", [TestResultValue(name=ALLOWED_VALUE_COUNT, value=str(res))], row_count=row_count, diff --git a/ingestion/src/metadata/ingestion/source/database/exasol/__init__.py b/ingestion/src/metadata/ingestion/source/database/exasol/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/ingestion/src/metadata/ingestion/source/database/exasol/connection.py b/ingestion/src/metadata/ingestion/source/database/exasol/connection.py new file mode 100644 index 000000000000..abdcc515b281 --- /dev/null +++ b/ingestion/src/metadata/ingestion/source/database/exasol/connection.py @@ -0,0 +1,87 @@ +from typing import Optional +from urllib.parse import quote_plus + +from pydantic import SecretStr +from sqlalchemy.engine import Engine + +from metadata.generated.schema.entity.automations.workflow import ( + Workflow as AutomationWorkflow, +) +from metadata.generated.schema.entity.services.connections.database.exasolConnection import ( + ExasolConnection, +) +from metadata.ingestion.connections.builders import ( + create_generic_db_connection, + get_connection_args_common, +) +from metadata.ingestion.connections.test_connections import test_query +from metadata.ingestion.ometa.ometa_api import OpenMetadata +from metadata.utils.logger import ingestion_logger + +logger = ingestion_logger() + + +def get_connection_url(connection: ExasolConnection) -> str: + """ + Common method for building the source connection urls + """ + + url = f"{connection.scheme.value}://" + + if connection.username: + url += f"{quote_plus(connection.username)}" + connection.password = ( + SecretStr("") if not connection.password else connection.password + ) + url += ( + f":{quote_plus(connection.password.get_secret_value())}" + if connection + else "" + ) + url += "@" + + url += connection.hostPort + + if hasattr(connection, "databaseSchema"): + url += f"/{connection.databaseSchema}" if connection.databaseSchema else "" + + tls_settings = { + "validate-certificate": {}, + "ignore-certificate": {"SSLCertificate": "SSL_VERIFY_NONE"}, + "disable-tls": {"SSLCertificate": "SSL_VERIFY_NONE", "ENCRYPTION": "no"}, + } + options = tls_settings[connection.tls.value] + if options: + if (hasattr(connection, "database") and not connection.database) or ( + hasattr(connection, "databaseSchema") and not connection.databaseSchema + ): + url += "/" + params = "&".join( + f"{key}={quote_plus(value)}" for (key, value) in options.items() if value + ) + url = f"{url}?{params}" + return url + + +def get_connection(connection: ExasolConnection) -> Engine: + """ + Create connection + """ + return create_generic_db_connection( + connection=connection, + get_connection_url_fn=get_connection_url, + get_connection_args_fn=get_connection_args_common, + ) + + +def test_connection( + metadata: OpenMetadata, + engine: Engine, + service_connection: ExasolConnection, + automation_workflow: Optional[AutomationWorkflow] = None, +) -> None: + """ + Test connection. This can be executed either as part + of a metadata workflow or during an Automation Workflow + """ + test_query(engine, "SELECT 1;") diff --git a/ingestion/src/metadata/ingestion/source/database/exasol/metadata.py b/ingestion/src/metadata/ingestion/source/database/exasol/metadata.py new file mode 100644 index 000000000000..d1b7b71eb358 --- /dev/null +++ b/ingestion/src/metadata/ingestion/source/database/exasol/metadata.py @@ -0,0 +1,27 @@ +from typing import Optional, cast + +from metadata.generated.schema.entity.services.connections.database.exasolConnection import ( + ExasolConnection, +) +from metadata.generated.schema.metadataIngestion.workflow import ( + Source as WorkflowSource, +) +from metadata.ingestion.api.steps import InvalidSourceException +from metadata.ingestion.ometa.ometa_api import OpenMetadata +from metadata.ingestion.source.database.common_db_source import CommonDbSourceService + + +class ExasolSource(CommonDbSourceService): + @classmethod + def create( + cls, config_dict, metadata: OpenMetadata, pipeline_name: Optional[str] = None + ): + config: WorkflowSource = WorkflowSource.model_validate(config_dict) + if config.serviceConnection is None: + raise InvalidSourceException("Missing service connection") + connection = cast(ExasolConnection, config.serviceConnection.root.config) + if not isinstance(connection, ExasolConnection): + raise InvalidSourceException( + f"Expected ExasolConnection, but got {connection}" + ) + return cls(config, metadata) diff --git a/ingestion/src/metadata/ingestion/source/database/exasol/service_spec.py b/ingestion/src/metadata/ingestion/source/database/exasol/service_spec.py new file mode 100644 index 000000000000..802439326e07 --- /dev/null +++ b/ingestion/src/metadata/ingestion/source/database/exasol/service_spec.py @@ -0,0 +1,4 @@ +from metadata.ingestion.source.database.exasol.metadata import ExasolSource +from metadata.utils.service_spec.default import DefaultDatabaseSpec + +ServiceSpec = DefaultDatabaseSpec(metadata_source_class=ExasolSource) diff --git a/ingestion/src/metadata/ingestion/source/database/snowflake/profiler/profiler.py b/ingestion/src/metadata/ingestion/source/database/snowflake/profiler/profiler.py index e028bb2a229c..f68f18f85c15 100644 --- a/ingestion/src/metadata/ingestion/source/database/snowflake/profiler/profiler.py +++ b/ingestion/src/metadata/ingestion/source/database/snowflake/profiler/profiler.py @@ -13,15 +13,14 @@ Profiler for Snowflake """ from metadata.ingestion.source.database.snowflake.profiler.system import ( - SnowflakeSystemMetricsSource, + SnowflakeSystemMetricsComputer, ) from metadata.profiler.interface.sqlalchemy.snowflake.profiler_interface import ( SnowflakeProfilerInterface, ) +from metadata.profiler.metrics.system.system import SystemMetricsComputer class SnowflakeProfiler(SnowflakeProfilerInterface): - def initialize_system_metrics_computer( - self, **kwargs - ) -> SnowflakeSystemMetricsSource: - return SnowflakeSystemMetricsSource(session=self.session) + def initialize_system_metrics_computer(self, **kwargs) -> SystemMetricsComputer: + return SnowflakeSystemMetricsComputer(session=self.session) diff --git a/ingestion/src/metadata/ingestion/source/database/snowflake/profiler/system.py b/ingestion/src/metadata/ingestion/source/database/snowflake/profiler/system.py index fa92017db4db..c986ac0beed1 100644 --- a/ingestion/src/metadata/ingestion/source/database/snowflake/profiler/system.py +++ b/ingestion/src/metadata/ingestion/source/database/snowflake/profiler/system.py @@ -17,7 +17,9 @@ CacheProvider, EmptySystemMetricsSource, SQASessionProvider, + SystemMetricsComputer, ) +from metadata.utils.collections import CaseInsensitiveString from metadata.utils.logger import profiler_logger from metadata.utils.lru_cache import LRU_CACHE_SIZE, LRUCache from metadata.utils.profiler_utils import get_identifiers_from_string @@ -222,7 +224,7 @@ def get_snowflake_system_queries( """ try: - logger.debug(f"Trying to parse query [{query_log_entry.query_id}]") + logger.debug(f"Parsing snowflake query [{query_log_entry.query_id}]") identifier = _parse_query(query_log_entry.query_text) if not identifier: raise RuntimeError("Could not identify the table from the query.") @@ -358,9 +360,17 @@ def get_system_profile( } for q in query_results if getattr(q, rows_affected_field) > 0 - and q.database_name == db - and q.schema_name == schema - and q.table_name == table + # snowflake SQL identifiers are case insensitive. All identifiers are stored in upper case. + and ( + CaseInsensitiveString(db), + CaseInsensitiveString(schema), + CaseInsensitiveString(table), + ) + == ( + q.database_name, + q.schema_name, + q.table_name, + ) ] ) @@ -387,3 +397,9 @@ def get_queries(self, table: str) -> List[SnowflakeQueryResult]: for row in queries ] return [result for result in results if result is not None] + + +class SnowflakeSystemMetricsComputer( + SystemMetricsComputer, SnowflakeSystemMetricsSource +): + pass diff --git a/ingestion/tests/cli_e2e/base/test_cli_db.py b/ingestion/tests/cli_e2e/base/test_cli_db.py index ec6aa8f07b40..43bd7247cf4c 100644 --- a/ingestion/tests/cli_e2e/base/test_cli_db.py +++ b/ingestion/tests/cli_e2e/base/test_cli_db.py @@ -421,8 +421,6 @@ def assert_status_for_data_quality(self, source_status, sink_status): def system_profile_assertions(self): cases = self.get_system_profile_cases() - if not cases: - return for table_fqn, expected_profile in cases: actual_profiles = self.openmetadata.get_profile_data( table_fqn, @@ -431,10 +429,13 @@ def system_profile_assertions(self): profile_type=SystemProfile, ).entities actual_profiles = sorted( - actual_profiles, key=lambda x: x.timestamp.root + actual_profiles, key=lambda x: (x.timestamp.root, x.operation.value) + ) + expected_profile = sorted( + expected_profile, + key=lambda x: (x.timestamp.root, x.operation.value), ) - actual_profiles = actual_profiles[-len(expected_profile) :] - assert len(expected_profile) == len(actual_profiles) + assert len(actual_profiles) >= len(expected_profile) for expected, actual in zip(expected_profile, actual_profiles): try: assert_equal_pydantic_objects( diff --git a/ingestion/tests/cli_e2e/test_cli_bigquery.py b/ingestion/tests/cli_e2e/test_cli_bigquery.py index 77d333902982..8ce1bec2ae9f 100644 --- a/ingestion/tests/cli_e2e/test_cli_bigquery.py +++ b/ingestion/tests/cli_e2e/test_cli_bigquery.py @@ -12,7 +12,7 @@ """ Test Bigquery connector with CLI """ -from typing import List +from typing import List, Tuple from metadata.generated.schema.entity.data.table import DmlOperationType, SystemProfile from metadata.generated.schema.type.basic import Timestamp diff --git a/ingestion/tests/unit/test_source_connection.py b/ingestion/tests/unit/test_source_connection.py index e2bb811e67a1..d1984048980d 100644 --- a/ingestion/tests/unit/test_source_connection.py +++ b/ingestion/tests/unit/test_source_connection.py @@ -37,6 +37,12 @@ DruidConnection, DruidScheme, ) +from metadata.generated.schema.entity.services.connections.database.exasolConnection import ( + ExasolConnection, + ExasolScheme, + ExasolType, + Tls, +) from metadata.generated.schema.entity.services.connections.database.hiveConnection import ( HiveConnection, HiveScheme, @@ -1178,3 +1184,61 @@ def test_oracle_url(self): ), ) assert get_connection_url(oracle_conn_obj) == expected_url + + def test_exasol_url(self): + from metadata.ingestion.source.database.exasol.connection import ( + get_connection_url, + ) + + def generate_test_data( + username="admin", password="password", port=8563, hostname="localhost" + ): + from collections import namedtuple + + TestData = namedtuple("TestData", ["comment", "kwargs", "expected"]) + host_port = f"{hostname}:{port}" + + yield from ( + TestData( + comment="Testing default parameters", + kwargs={ + "username": username, + "password": password, + "hostPort": host_port, + "tls": Tls.validate_certificate, + }, + expected="exa+websocket://admin:password@localhost:8563", + ), + TestData( + comment="Testing the manual setting of parameters", + kwargs={ + "type": ExasolType.Exasol, + "scheme": ExasolScheme.exa_websocket, + "username": username, + "password": password, + "hostPort": host_port, + "tls": Tls.ignore_certificate, + }, + expected="exa+websocket://admin:password@localhost:8563?SSLCertificate=SSL_VERIFY_NONE", + ), + TestData( + comment="Testing disabling TLS completely", + kwargs={ + "type": ExasolType.Exasol, + "scheme": ExasolScheme.exa_websocket, + "username": username, + "password": password, + "hostPort": host_port, + "tls": Tls.disable_tls, + }, + expected="exa+websocket://admin:password@localhost:8563?SSLCertificate=SSL_VERIFY_NONE&ENCRYPTION=no", + ), + ) + + # execute test cases + for data in generate_test_data(): + with self.subTest(kwargs=data.kwargs, expected=data.expected): + connection = ExasolConnection(**data.kwargs) + actual = get_connection_url(connection) + expected = data.expected + assert actual == expected diff --git a/openmetadata-clients/openmetadata-java-client/pom.xml b/openmetadata-clients/openmetadata-java-client/pom.xml index f7a3dd56ab72..c60dfde3b786 100644 --- a/openmetadata-clients/openmetadata-java-client/pom.xml +++ b/openmetadata-clients/openmetadata-java-client/pom.xml @@ -20,6 +20,8 @@ 8.3.3 2.1.23 1.3.5 + 3.6.0 + 3.3.1 @@ -200,6 +202,7 @@ org.apache.maven.plugins maven-source-plugin + ${maven-source-plugin.version} org.codehaus.mojo diff --git a/openmetadata-docs/content/partials/v1.5/releases/latest.md b/openmetadata-docs/content/partials/v1.5/releases/latest.md index 141046bf0c59..f4613bb5347f 100644 --- a/openmetadata-docs/content/partials/v1.5/releases/latest.md +++ b/openmetadata-docs/content/partials/v1.5/releases/latest.md @@ -1,7 +1,7 @@ -# 1.5.8 Release 🎉 +# 1.5.10 Release 🎉 {% note noteType="Tip" %} -**Oct 24th, 2024** +**Oct 31st, 2024** {% /note %} {% inlineCalloutContainer %} @@ -10,27 +10,17 @@ color="violet-70" icon="celebration" bold="Upgrade OpenMetadata" href="/deployment/upgrade" %} -Learn how to upgrade your OpenMetadata instance to 1.5.8! +Learn how to upgrade your OpenMetadata instance to 1.5.10! {% /inlineCallout %} {% /inlineCalloutContainer %} -You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.5.8-release). +You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.5.10-release). # What's Changed -- Minor: Add location path to a table entity. -- Minor: Do not include soft deleted assets in the Data Insight. -- Minor: Supported total unique user count on the Team page. -- Fix: Add Azure Token Base Authentication -- Fix: Hive Meta store connection issue. -- Fix: Issues in zh language search index mapping. -- Fix: Live index is on test suite creation. -- Fix: LocationPath Index. -- Fix: Mode dashboard ingestion API call. -- Fix: Mode test connection returns data in dict instead of JSON. -- Fix: Quicksight lineage source. -- Fix: Task deserialization in Airflow metadata ingestion. -- Fix: Web analytic activity being reset. +- Fix encoding issue for teams search query. +- Fix disable empty rule for query builder widget. +- Fix unable to add more enum values for enum cp. +- Fix navigate to listing for deleting a service. - -**Full Changelog**: https://github.com/open-metadata/OpenMetadata/compare/1.5.7-release...1.5.8-release \ No newline at end of file +**Full Changelog**: https://github.com/open-metadata/OpenMetadata/compare/1.5.9-release...1.5.10-release diff --git a/openmetadata-docs/content/partials/v1.6/releases/latest.md b/openmetadata-docs/content/partials/v1.6/releases/latest.md index 141046bf0c59..f4613bb5347f 100644 --- a/openmetadata-docs/content/partials/v1.6/releases/latest.md +++ b/openmetadata-docs/content/partials/v1.6/releases/latest.md @@ -1,7 +1,7 @@ -# 1.5.8 Release 🎉 +# 1.5.10 Release 🎉 {% note noteType="Tip" %} -**Oct 24th, 2024** +**Oct 31st, 2024** {% /note %} {% inlineCalloutContainer %} @@ -10,27 +10,17 @@ color="violet-70" icon="celebration" bold="Upgrade OpenMetadata" href="/deployment/upgrade" %} -Learn how to upgrade your OpenMetadata instance to 1.5.8! +Learn how to upgrade your OpenMetadata instance to 1.5.10! {% /inlineCallout %} {% /inlineCalloutContainer %} -You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.5.8-release). +You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.5.10-release). # What's Changed -- Minor: Add location path to a table entity. -- Minor: Do not include soft deleted assets in the Data Insight. -- Minor: Supported total unique user count on the Team page. -- Fix: Add Azure Token Base Authentication -- Fix: Hive Meta store connection issue. -- Fix: Issues in zh language search index mapping. -- Fix: Live index is on test suite creation. -- Fix: LocationPath Index. -- Fix: Mode dashboard ingestion API call. -- Fix: Mode test connection returns data in dict instead of JSON. -- Fix: Quicksight lineage source. -- Fix: Task deserialization in Airflow metadata ingestion. -- Fix: Web analytic activity being reset. +- Fix encoding issue for teams search query. +- Fix disable empty rule for query builder widget. +- Fix unable to add more enum values for enum cp. +- Fix navigate to listing for deleting a service. - -**Full Changelog**: https://github.com/open-metadata/OpenMetadata/compare/1.5.7-release...1.5.8-release \ No newline at end of file +**Full Changelog**: https://github.com/open-metadata/OpenMetadata/compare/1.5.9-release...1.5.10-release diff --git a/openmetadata-docs/content/v1.5.x/collate-menu.md b/openmetadata-docs/content/v1.5.x/collate-menu.md index 32e06297b0e2..1d86fd398a27 100644 --- a/openmetadata-docs/content/v1.5.x/collate-menu.md +++ b/openmetadata-docs/content/v1.5.x/collate-menu.md @@ -172,6 +172,12 @@ site_menu: url: /connectors/database/sap-hana - category: Connectors / Database / SAP Hana / Run Externally url: /connectors/database/sap-hana/yaml + - category: Connectors / Database / SAP ERP + url: /connectors/database/sap-erp + - category: Connectors / Database / SAP ERP / Run Externally + url: /connectors/database/sap-erp/yaml + - category: Connectors / Database / SAP ERP / Setup SAP ERP APIs + url: /connectors/database/sap-erp/setup-sap-apis - category: Connectors / Database / SAS url: /connectors/database/sas - category: Connectors / Database / SAS / Run Externally diff --git a/openmetadata-docs/content/v1.5.x/deployment/azure-passwordless-auth.md b/openmetadata-docs/content/v1.5.x/deployment/azure-passwordless-auth.md new file mode 100644 index 000000000000..a89745f17449 --- /dev/null +++ b/openmetadata-docs/content/v1.5.x/deployment/azure-passwordless-auth.md @@ -0,0 +1,98 @@ +--- +title: Azure - Enable Passwordless Database Backend Connection +slug: /deployment/azure-passwordless-auth +collate: false +--- + +# Azure - Enable Passwordless Database Backend Connection + +By Default, OpenMetadata supports basic authentication when connecting to MySQL/PostgreSQL as Database backend. With Azure, you can enhance the security for configuring Database configurations other the basic authentication mechanism. +This guide will help you setup the application to use passwordless approach for Azure PaaS Databases (preferrably [Azure Database for PostgreSQL - Flexible Server](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/service-overview) and [Azure Database for MySQL - Flexible Server](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/overview)). + +# Prerequisites + +This guide requires the following prerequisites - + +- Azure Database Flexible Server enabled with Microsoft Entra authentication +- [Azure Managed Identities](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) +- Azure Kubernetes Service (Enabled with Workload Identity) or Azure VM +- OpenMetadata Application Version `1.5.9` and higher + +If you are looking to enable Passwordless Database Backend Configuration on Existing OpenMetadata Application hosted using Azure Cloud, you need to create perform the following prerequisites - + +- Create Managed Identity from Azure Portal +- Create a SQL User for Managed Identity in Azure Databases + - PostgreSQL Reference link [here](https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/how-to-manage-azure-ad-users#create-a-userrole-using-microsoft-entra-principal-name) + - MySQL Reference link [here](https://learn.microsoft.com/en-us/azure/mysql/flexible-server/how-to-azure-ad#create-microsoft-entra-users-in-azure-database-for-mysql) +- Assign Existing OpenMetadata Database Tables Ownership to Managed Identities created in above step + +# Enabling Passwordless connections with OpenMetadata + +Configure your Helm Values for Kubernetes Deployment like below - + +```yaml +# For PostgreSQL +commonLabels: + azure.workload.identity/use: "true" +serviceAccount: + create: true + annotations: + azure.workload.identity/client-id: + name: "openmetadata-sa" +automountServiceAccountToken: true +openmetadata: + config: + database: + host: + driverClass: org.postgresql.Driver + dbParams: "azure=true&allowPublicKeyRetrieval=true&serverTimezone=UTC&sslmode=require&authenticationPluginClassName=com.azure.identity.extensions.jdbc.postgresql.AzurePostgresqlAuthenticationPlugin" + dbScheme: postgresql + port: 5432 + auth: + username: + password: + secretRef: database-secrets + secretKey: openmetadata-database-password + databaseName: + +# For MySQL +commonLabels: + azure.workload.identity/use: "true" +serviceAccount: + create: true + annotations: + azure.workload.identity/client-id: + name: "openmetadata-sa" +automountServiceAccountToken: true +openmetadata: + config: + database: + host: + driverClass: com.mysql.cj.jdbc.Driver + dbParams: "azure=true&allowPublicKeyRetrieval=trueserverTimezone=UTC&sslMode=REQUIRED&defaultAuthenticationPlugin=com.azure.identity.extensions.jdbc.mysql.AzureMysqlAuthenticationPlugin" + dbScheme: mysql + port: 3306 + auth: + username: + password: + secretRef: database-secrets + secretKey: openmetadata-database-password + databaseName: +``` +{% note %} + +In the above code snippet, the Database Credentials (Auth Password Kubernetes Secret) is still required and cannot be empty. Set it to dummy / random value. + +{% /note %} + +Install / Upgrade your Helm Release with the following command - + +```bash +helm repo update open-metadata +helm upgrade --install openmetadata open-metadata/openmetadata --values +``` + +For further reference, checkout the official documentation available in the below links - + +- [MySQL](https://learn.microsoft.com/en-us/azure/developer/java/spring-framework/migrate-mysql-to-passwordless-connection?tabs=sign-in-azure-cli%2Cjava%2Capp-service) +- [PostgreSQL](https://learn.microsoft.com/en-us/azure/developer/java/spring-framework/migrate-postgresql-to-passwordless-connection?tabs=sign-in-azure-cli%2Cjava%2Capp-service%2Cassign-role-service-connector) diff --git a/openmetadata-docs/content/v1.5.x/deployment/ingestion/openmetadata.md b/openmetadata-docs/content/v1.5.x/deployment/ingestion/openmetadata.md index cc1062be2452..09776c1d72ce 100644 --- a/openmetadata-docs/content/v1.5.x/deployment/ingestion/openmetadata.md +++ b/openmetadata-docs/content/v1.5.x/deployment/ingestion/openmetadata.md @@ -119,8 +119,8 @@ openmetadata: ## Custom Airflow Installation {% note %} -- Note that the `openmetadata-ingestion` only supports Python versions 3.7, 3.8 and 3.9. -- The supported Airflow versions are 2.3, 2.4 and 2.5. From release 1.1.1 onwards, OpenMetadata will also support Airflow 2.6. +- Note that the `openmetadata-ingestion` only supports Python versions 3.7, 3.8, 3.9 and 3.10. +- The supported Airflow versions are 2.3, 2.4, 2.5, 2.6, and 2.7. Starting from release 1.5, OpenMetadata will support compatibility with Airflow versions up to 2.9. {% /note %} You will need to follow three steps: diff --git a/openmetadata-docs/content/v1.5.x/how-to-guides/admin-guide/how-to-ingest-metadata.md b/openmetadata-docs/content/v1.5.x/how-to-guides/admin-guide/how-to-ingest-metadata.md index c71aa02abbc8..c8e598c9fb67 100644 --- a/openmetadata-docs/content/v1.5.x/how-to-guides/admin-guide/how-to-ingest-metadata.md +++ b/openmetadata-docs/content/v1.5.x/how-to-guides/admin-guide/how-to-ingest-metadata.md @@ -47,7 +47,17 @@ Let’s start with an example of fetching metadata from a database service, i.e. - Start by creating a service connection by clicking on **Settings** from the left nav bar. Navigate to the **Services** section, and click on **Databases**. Click on **Add New Service**. {% image - src="/images/v1.5/how-to-guides/admin-guide/connector1.jpg" + src="/images/v1.5/how-to-guides/admin-guide/connector1.png" + alt="Create a Service Connection" + caption="Create a Service Connection" + /%} +{% image + src="/images/v1.5/how-to-guides/admin-guide/connector1.1.png" + alt="Create a Service Connection" + caption="Create a Service Connection" + /%} +{% image + src="/images/v1.5/how-to-guides/admin-guide/connector1.2.png" alt="Create a Service Connection" caption="Create a Service Connection" /%} diff --git a/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/automation/set-up-automation.md b/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/automation/set-up-automation.md index 0b57f0a55b05..91a23c36f5dc 100644 --- a/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/automation/set-up-automation.md +++ b/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/automation/set-up-automation.md @@ -27,11 +27,19 @@ caption="Add Automation" /%} ### Step 3: Fill in Automation Details + In the pop-up window, provide the necessary information to set up the automation: + - **Automation Name**: Give a meaningful name to the automation for easy identification. - **Description**: Add a brief description explaining what this automation will do (e.g., "Daily metadata ingestion for database XYZ"). -- **Logic/Conditions**: Define any conditions or specific criteria needed for this automation to work (e.g., specific tables or columns to be included). - Ensure that the logic is set up as per your specific requirements to make the automation useful for your workflows. +- **Logic/Conditions**: Define any conditions or specific criteria needed for this automation to work (e.g., specific tables or columns to be included). Ensure that the logic is set up as per your specific requirements to make the automation effective for your workflows. + +Additionally, use the **Apply to Child** option to add a list of **tags** and **glossary terms** to selected assets at the column level: + - If a list of columns is specified, tags will only be applied to columns with matching names. + - By default, incoming tags will merge with existing tags. + - To overwrite existing tags with the new list, select the **Overwrite Metadata** option, replacing any previous tags with the incoming ones. + +Ensure each setting aligns with your automation requirements to maximize efficiency. {% image src="/images/v1.5/how-to-guides/governance/automation-4.png" diff --git a/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/classification/auto.md b/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/classification/auto.md index 3563b4b463d7..f5ca7aa84c23 100644 --- a/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/classification/auto.md +++ b/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/classification/auto.md @@ -7,7 +7,7 @@ slug: /how-to-guides/data-governance/classification/auto OpenMetadata identifies PII data and auto tags or suggests the tags. The data profiler automatically tags the PII-Sensitive data. The addition of tags about PII data helps consumers and governance teams identify data that needs to be treated carefully. -In the example below, the columns ‘user_name’ and ‘social security number’ are auto-tagged as PII-sensitive. This works using NLP as part of the profiler during ingestion. +In the example below, the columns ‘last_name’ and ‘social security number’ are auto-tagged as PII-sensitive. This works using NLP as part of the profiler during ingestion. {% image src="/images/v1.5/how-to-guides/governance/auto1.png" @@ -15,7 +15,7 @@ alt="User_name and Social Security Number are Auto-Classified as PII Sensitive" caption="User_name and Social Security Number are Auto-Classified as PII Sensitive" /%} -In the below example, the column ‘dwh_x10’ is also auto-tagged as PII Sensitive, even though the column name does not provide much information. +In the below example, the column ‘number_of_orders’ is also auto-tagged as PII Sensitive, even though the column name does not provide much information. {% image src="/images/v1.5/how-to-guides/governance/auto2.png" @@ -23,7 +23,7 @@ alt="Column Name does not provide much information" caption="Column Name does not provide much information" /%} -When we look at the content of the column ‘dwh_x10’ in the Sample Data tab, it becomes clear that the auto-classification is based on the data in the column. +When we look at the content of the column ‘number_of_orders’ in the Sample Data tab, it becomes clear that the auto-classification is based on the data in the column. {% image src="/images/v1.5/how-to-guides/governance/auto3.png" diff --git a/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/glossary/glossary-term.md b/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/glossary/glossary-term.md index 5cad5c28335f..e291cd8ee02b 100644 --- a/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/glossary/glossary-term.md +++ b/openmetadata-docs/content/v1.5.x/how-to-guides/data-governance/glossary/glossary-term.md @@ -64,7 +64,7 @@ caption="Glossary Terms Tab" ### Assets Tab -The **Assets Tab** displays all the assets that are associated with the glossary term. These data assets are further subgrouped as Tables, Topics, Dashboards. The right side panel shows a preview of the data assets selected. +The **Assets Tab** displays all the assets that are associated with the glossary term. These data assets are further subgrouped on the basis of databases. The right side panel shows a preview of the data assets selected. {% image src="/images/v1.5/how-to-guides/governance/term3.png" diff --git a/openmetadata-docs/content/v1.5.x/how-to-guides/data-insights/report.md b/openmetadata-docs/content/v1.5.x/how-to-guides/data-insights/report.md index d76a171d9353..6f4d616471cb 100644 --- a/openmetadata-docs/content/v1.5.x/how-to-guides/data-insights/report.md +++ b/openmetadata-docs/content/v1.5.x/how-to-guides/data-insights/report.md @@ -22,8 +22,18 @@ caption="Data Insights Report" All the reports can be filtered by **Teams, Data Tiers, and a Time Filter**. {% image src="/images/v1.5/how-to-guides/insights/insights2.png" -alt="Data Insights Report Filters: Team, Tier, Time" -caption="Data Insights Report Filters: Team, Tier, Time" +alt="Data Insights Report Filter: Team" +caption="Data Insights Report Filter: Team" +/%} +{% image +src="/images/v1.5/how-to-guides/insights/insights2.1.png" +alt="Data Insights Report Filter: Tier" +caption="Data Insights Report Filter: Tier" +/%} +{% image +src="/images/v1.5/how-to-guides/insights/insights2.2.png" +alt="Data Insights Report Filter: Time" +caption="Data Insights Report Filter: Time" /%} ## Data Assets Report diff --git a/openmetadata-docs/content/v1.5.x/how-to-guides/data-lineage/workflow.md b/openmetadata-docs/content/v1.5.x/how-to-guides/data-lineage/workflow.md index 518ff39e782a..997c77b6dbda 100644 --- a/openmetadata-docs/content/v1.5.x/how-to-guides/data-lineage/workflow.md +++ b/openmetadata-docs/content/v1.5.x/how-to-guides/data-lineage/workflow.md @@ -24,13 +24,25 @@ Apart from the Metadata ingestion, we can create a workflow that will obtain the ### 1. Add a Lineage Ingestion -Navigate to **Settings >> Services**. Select the required service +Navigate to **Settings >> Services >> Databases**. Select the required service {% image src="/images/v1.5/how-to-guides/lineage/wkf1.png" alt="Select a Service" caption="Select a Service" /%} + {% image + src="/images/v1.5/how-to-guides/lineage/wkf1.1.png" + alt="Click on Databases" + caption="Click on Databases" + /%} + + {% image + src="/images/v1.5/how-to-guides/lineage/wkf1.2.png" + alt="Select the Database" + caption="Select the Database" + /%} + Go the the **Ingestions** tab. Click on **Add Ingestion** and select **Add Lineage Ingestion**. {% image src="/images/v1.5/how-to-guides/lineage/wkf2.png" diff --git a/openmetadata-docs/content/v1.5.x/menu.md b/openmetadata-docs/content/v1.5.x/menu.md index 478d3a68f683..48114d41e41e 100644 --- a/openmetadata-docs/content/v1.5.x/menu.md +++ b/openmetadata-docs/content/v1.5.x/menu.md @@ -187,6 +187,8 @@ site_menu: url: /deployment/rds-iam-auth - category: Deployment / How to enable Azure Database Auth url: /deployment/azure-auth + - category: Deployment / Azure - Enable Passwordless Database Backend Connection + url: /deployment/azure-passwordless-auth - category: Deployment / Server Configuration Reference url: /deployment/configuration - category: Deployment / Database Connection Pooling diff --git a/openmetadata-docs/content/v1.5.x/quick-start/local-docker-deployment.md b/openmetadata-docs/content/v1.5.x/quick-start/local-docker-deployment.md index 8c9346120fa6..ce637c764b48 100644 --- a/openmetadata-docs/content/v1.5.x/quick-start/local-docker-deployment.md +++ b/openmetadata-docs/content/v1.5.x/quick-start/local-docker-deployment.md @@ -65,26 +65,27 @@ Docker Compose version v2.1.1 Follow the instructions [here](https://docs.docker.com/compose/cli-command/#install-on-linux) to install docker compose version 2.0.0 1. Run the following command to download the current stable release of Docker Compose - ``` - DOCKER_CONFIG=${DOCKER_CONFIG:-$HOME/.docker} - - mkdir -p $DOCKER_CONFIG/cli-plugins - curl -SL https://github.com/docker/compose/releases/download/v2.2.3/docker-compose-linux-x86_64 -o - $DOCKER_CONFIG/cli-plugins/docker-compose - ``` - + + ``` + DOCKER_CONFIG=${DOCKER_CONFIG:-$HOME/.docker} + + mkdir -p $DOCKER_CONFIG/cli-plugins + curl -SL https://github.com/docker/compose/releases/download/v2.2.3/docker-compose-linux-x86_64 -o + $DOCKER_CONFIG/cli-plugins/docker-compose + ``` + This command installs Compose V2 for the active user under $HOME directory. To install Docker Compose for all users on your system, replace` ~/.docker/cli-plugins` with `/usr/local/lib/docker/cli-plugins`. 2. Apply executable permissions to the binary - ``` - chmod +x $DOCKER_CONFIG/cli-plugins/docker-compose - ``` + ``` + chmod +x $DOCKER_CONFIG/cli-plugins/docker-compose + ``` 3. Test your installation - ``` - docker compose version - > Docker Compose version v2.2.3 - ``` + ``` + docker compose version + > Docker Compose version v2.2.3 + ``` ## Windows @@ -97,7 +98,6 @@ Follow the instructions [here](https://docs.docker.com/compose/cli-command/#inst - After completion of the pre-requisites, please install `python3-pip` and `python3-venv` on your Ubuntu system. - Command: `apt install python3-pip python3-venv` (Ensure that you have the privilege to install packages, if not, please use Super User.) - ## Procedure ### 1. Create a directory for OpenMetadata @@ -113,21 +113,22 @@ mkdir openmetadata-docker && cd openmetadata-docker Download the docker-compose.yml file from the release page [here](https://github.com/open-metadata/OpenMetadata/releases/latest). The latest version is at the top of the page - - Deploying with MySQL: Download `docker-compose.yml` file from the above link. - - Deploying with PostgreSQL: Download `docker-compose-postgres.yml` file from the above link. + +- Deploying with MySQL: Download `docker-compose.yml` file from the above link. +- Deploying with PostgreSQL: Download `docker-compose-postgres.yml` file from the above link. You can use the curl or wget command as well to fetch the docker compose files from your terminal - ```commandline -curl -sL -o docker-compose.yml https://github.com/open-metadata/OpenMetadata/releases/download/1.5.8-release/docker-compose.yml +curl -sL -o docker-compose.yml https://github.com/open-metadata/OpenMetadata/releases/download/1.5.10-release/docker-compose.yml -curl -sL -o docker-compose-postgres.yml https://github.com/open-metadata/OpenMetadata/releases/download/1.5.8-release/docker-compose-postgres.yml +curl -sL -o docker-compose-postgres.yml https://github.com/open-metadata/OpenMetadata/releases/download/1.5.10-release/docker-compose-postgres.yml ``` ```commandline -wget https://github.com/open-metadata/OpenMetadata/releases/download/1.5.8-release/docker-compose.yml +wget https://github.com/open-metadata/OpenMetadata/releases/download/1.5.10-release/docker-compose.yml -wget https://github.com/open-metadata/OpenMetadata/releases/download/1.5.8-release/docker-compose-postgres.yml +wget https://github.com/open-metadata/OpenMetadata/releases/download/1.5.10-release/docker-compose-postgres.yml ``` ### 3. Start the Docker Compose Services @@ -137,7 +138,7 @@ Run the below command to deploy the OpenMetadata For OpenMetadata with MySQL Database - ```commandline -docker compose -f docker-compose.yml up --detach +docker compose -f docker-compose.yml up --detach ``` For OpenMetadata with PostgreSQL Database - @@ -149,6 +150,7 @@ docker compose -f docker-compose-postgres.yml up --detach These commands will pull the docker images of Openmetadata for MySQL / PostgreSQL, OpenMetadata-Server, OpenMetadata-Ingestion and Elasticsearch. Upon running this command you should see output similar to the following. + ```commandline +] Running 7/8 ⠿ Network metadata_app_net Created 0.2s @@ -166,10 +168,10 @@ You can validate that all containers are up by running with command `docker ps`. ```commandline ❯ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -470cc8149826 openmetadata/server:1.5.8 "./openmetadata-star…" 45 seconds ago Up 43 seconds 3306/tcp, 9200/tcp, 9300/tcp, 0.0.0.0:8585-8586->8585-8586/tcp openmetadata_server -63578aacbff5 openmetadata/ingestion:1.5.8 "./ingestion_depende…" 45 seconds ago Up 43 seconds 0.0.0.0:8080->8080/tcp openmetadata_ingestion +470cc8149826 openmetadata/server:1.5.10 "./openmetadata-star…" 45 seconds ago Up 43 seconds 3306/tcp, 9200/tcp, 9300/tcp, 0.0.0.0:8585-8586->8585-8586/tcp openmetadata_server +63578aacbff5 openmetadata/ingestion:1.5.10 "./ingestion_depende…" 45 seconds ago Up 43 seconds 0.0.0.0:8080->8080/tcp openmetadata_ingestion 9f5ee8334f4b docker.elastic.co/elasticsearch/elasticsearch:7.16.3 "/tini -- /usr/local…" 45 seconds ago Up 44 seconds 0.0.0.0:9200->9200/tcp, 0.0.0.0:9300->9300/tcp openmetadata_elasticsearch -08947ab3424b openmetadata/db:1.5.8 "/entrypoint.sh mysq…" 45 seconds ago Up 44 seconds (healthy) 3306/tcp, 33060-33061/tcp openmetadata_mysql +08947ab3424b openmetadata/db:1.5.10 "/entrypoint.sh mysq…" 45 seconds ago Up 44 seconds (healthy) 3306/tcp, 33060-33061/tcp openmetadata_mysql ``` In a few seconds, you should be able to access the OpenMetadata UI at [http://localhost:8585](http://localhost:8585) @@ -197,6 +199,7 @@ via the UI. In the Airflow, you will also see some sample DAGs that will ingest sample data and serve as an example. You can access Airflow at [http://localhost:8080](http://localhost:8080). Use the following credentials to log in to Airflow. + - Username: `admin` - Password: `admin` @@ -266,7 +269,6 @@ installation. OpenMetadata. 3. Visit the [API](/swagger.html) documentation and explore the rich set of OpenMetadata APIs. - ### Volume Permissions: Operation not permitted If you are running on Windows (WSL2) and see permissions errors when starting the databases (either MySQL or Postgres), e.g., diff --git a/openmetadata-docs/content/v1.5.x/releases/releases/index.md b/openmetadata-docs/content/v1.5.x/releases/releases/index.md index 0b674cc0db46..feb0596eaaf9 100644 --- a/openmetadata-docs/content/v1.5.x/releases/releases/index.md +++ b/openmetadata-docs/content/v1.5.x/releases/releases/index.md @@ -14,6 +14,51 @@ version. To see what's coming in next releases, please check our [Roadmap](/rele {% partial file="/v1.5/releases/latest.md" /%} +# 1.5.9 Release + +{% note noteType="Tip" %} +**Oct 29th, 2024** +{% /note %} + +You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.5.9-release). + +# What's Changed + +- Prepare App Framework to handle application limits +- Add Query Builder widget +- Revamp MetaPilot as Collate AI and add limits (Collate only) +- Fix EntityLink for names with brackets +- Fix backend database Azure auth +- Mask Greenplum secrets on the UI + +**Full Changelog**: https://github.com/open-metadata/OpenMetadata/compare/1.5.8-release...1.5.9-release + +# 1.5.8 Release + +{% note noteType="Tip" %} +**Oct 23rd, 2024** +{% /note %} + +You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.5.8-release). + +# What's Changed + +- Minor: Add location path to a table entity. +- Minor: Do not include soft deleted assets in the Data Insight. +- Minor: Supported total unique user count on the Team page. +- Fix: Add Azure Token Base Authentication +- Fix: Hive Meta store connection issue. +- Fix: Issues in zh language search index mapping. +- Fix: Live index is on test suite creation. +- Fix: LocationPath Index. +- Fix: Mode dashboard ingestion API call. +- Fix: Mode test connection returns data in dict instead of JSON. +- Fix: Quicksight lineage source. +- Fix: Task deserialization in Airflow metadata ingestion. +- Fix: Web analytic activity being reset. + +**Full Changelog**: https://github.com/open-metadata/OpenMetadata/compare/1.5.7-release...1.5.8-release + # 1.5.7 Release {% note noteType="Tip" %} @@ -94,8 +139,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Improved AlationSink connector. - Fixed sktime version to fix AUT - Fixed Expected ColumnLineage but got dict -- Improved Collate API with Knowledge Center routes. ${CollateIconWithLinkMD} - +- Improved Collate API with Knowledge Center routes. ${CollateIconWithLinkMD} **Full Changelog**: https://github.com/open-metadata/OpenMetadata/compare/1.5.5-release...1.5.6-release @@ -147,6 +191,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # What's Changed ## OpenMetadata + - Hotfix to the Term Aggregation size on Data Insights - ES pagination with error handling - Updated Domain in Docker Compose & Docs @@ -158,6 +203,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Unable to access the import glossary page ## Collate + - Fix token limitations using config - Fix Automator pagination - Fix MetaPilot push for no constraint @@ -227,17 +273,20 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # Backward Incompatible Changes ## Multi Owners + OpenMetadata allows a single user or a team to be tagged as owners for any data assets. In Release 1.5.1, we allow users to tag multiple individual owners or a single team. This will allow organizations to add ownership to multiple individuals without necessarily needing to create a team around them like previously. This is a backward incompatible change, if you are using APIs, please make sure the owner field is now changed to “owners” ## Import/Export Format + To support the multi-owner format, we have now changed how we export and import the CSV file in glossary, services, database, schema, table, etc. The new format will be user:userName;team:TeamName If you are importing an older file, please make sure to make this change. ## Pydantic V2 + The core of OpenMetadata are the JSON Schemas that define the metadata standard. These schemas are automatically translated into Java, Typescript, and Python code with Pydantic classes. In this release, we have [migrated](https://docs.pydantic.dev/latest/migration/) the codebase from Pydantic V1 to Pydantic V2. @@ -287,14 +336,17 @@ In the latest release, several updates and enhancements have been made to the JS - **Tableau**: Enhanced support by adding capabilities for connecting with `TableauPublishedDatasource` and `TableauEmbeddedDatasource`, providing more granular control over data visualization and reporting. ## Include DDL + During the Database Metadata ingestion, we can optionally pick up the DDL for both tables and views. During the metadata ingestion, we use the view DDLs to generate the View Lineage. To reduce the processing time for out-of-the-box workflows, we are disabling the include DDL by default, whereas before, it was enabled, which potentially led to long-running workflows. ## Secrets Manager + Starting with the release 1.5.1, the JWT Token for the bots will be sent to the Secrets Manager if you configured one. It won't appear anymore in your dag_generated_configs in Airflow. ## Python SDK + The `metadata insight` command has been removed. Since Data Insights application was moved to be an internal system application instead of relying on external pipelines the SDK command to run the pipeline was removed. # What's New @@ -319,11 +371,13 @@ We also have improved the Table Data quality dashboard to showcase the tests cat {% youtube videoId="bXcQBtZuyoU" start="0:00" end="2:10" width="560px" height="315px" /%} ## Freshness Data Quality Tests (Collate) + Working with old data can lead to making wrong decisions. With the new Freshness test, you can validate that your data arrives at the right time. Freshness tests are a critical part of any data team's toolset. Bringing these tests together with lineage information and the Incident Manager, your team will be able to quickly detect issues related to missing data or stuck pipelines. {% youtube videoId="QRcR3m9cCGo" start="0:00" end="1:09" width="560px" height="315px" /%} ## Data Diff Data Quality Tests + Data quality checks are important not only within a single table but also between different tables. These data diff checks can ensure key data remains unchanged after transformation, or conversely, ensure that the transformations were actually performed. We are introducing the **table difference data quality test** to validate that multiple appearances of the same information remain consistent. Note that the test allows you to specify which column to use as a key and which columns you want to compare, and even add filters in the data to give you more control over multiple use cases. @@ -331,11 +385,13 @@ We are introducing the **table difference data quality test** to validate that m {% youtube videoId="oxZVS_UGrE4" start="0:00" end="2:22" width="560px" height="315px" /%} ## Domains RBAC & Subdomains + OpenMetadata introduced Domains & Data Products in 1.3.0. Since then, many large organizations have started using Domains & Data Products to achieve better ownership and collaboration around domains that can span multiple teams. In the 1.5.1 release, we added support for subdomains. This will help teams to organize into multiple subdomains within each domain. ### RBAC for Domains + With the 1.5.1 release, we are adding more stricter controls around Domain. Now, teams, data assets, glossaries, and classification can have domain concepts and can get a policy such that only users within a domain can access the data within a domain. Domain owners can use Data Products to publish data products and showcase publicly available data assets from a specific domain. This will help large companies to use a single OpenMetadata platform to unify all of their data and teams but also provide more stringent controls to segment the data between domains @@ -343,6 +399,7 @@ This will help large companies to use a single OpenMetadata platform to unify al {% youtube videoId="r-_HaewjgTQ" start="0:00" end="0:44" width="560px" height="315px" /%} ## Improved Explore Page & Data Asset Widget + OpenMetadata, with its simple UI/UX and data collaboration features, is becoming more attractive to non-technical users as well. Data Governance teams are using OpenMetadata to add glossary terms and policies around metadata. Teams using Collate SaaS product are taking advantage of our Automations feature to gain productivity in their governance tasks. Our new improved navigation on the Explore page will help users navigate hierarchically and find the data they are looking for. Users will see the data assets now grouped by `service name -> database -> schema -> tables/stored procedures`. @@ -352,11 +409,13 @@ We are also making the discovery of data more accessible for users introducing a {% youtube videoId="45ekUIRO1Ec" start="0:00" end="1:11" width="560px" height="315px" /%} ## Pipeline Status Widget + We are also adding another widget you can use to customize the Landing Page of the User Personas in your organization. With the Pipeline Status widget, Data Engineers can easily track the pipelines that are not behaving as expected. This widget, together with the obervability alerts that are already in place, will help your teams jump even faster to solving any issues in the platform. ## API as Data Asset + The Internet runs using APIs, both producing and consuming data. Organizations today run many microservices and REST APIs to capture data from their users and update a transaction database in the backend. On top of the many supported connectors across Databases, Dashboards, ML Models, etc. We believe that providing support for API Services as data assets will help to get the full picture of how the data is coming through from various services and landing into databases, going to warehouses and BI tools. @@ -366,20 +425,25 @@ In 1.5.1 we are introducing APIs as another first-class entity. Teams can now ca {% youtube videoId="b9wrVnM3u80" start="0:00" end="0:33" width="560px" height="315px" /%} ## Glossary Improvements + OpenMetadata supports multiple glossaries, an import/export and review process, and bulk asset tagging with glossary terms. Many teams are taking advantage of these features, and with an amazing open-source community, we are receiving great feedback on improving glossary functionality. Here are some of the improvements coming in 1.5.1: + 1. Glossary Reviewers can be teams 2. Updating a glossary will enforce a re-review 3. Renaming the Glossary Term while it's under review will keep the task associated with it open ## Data Insights (Collate) + The Data Insights application is meant to give you a quick glance of your data's state and allow you to take action based on the information you receive. To continue pursuing this objective, the application was completely refactored to allow customizability. This is achieved by the possibility of now creating custom dashboards. On this release you can create charts based on your data assets metadata based on your needs. ## Ingestion Connectors + 80+ connectors to help teams to centralize metadata. We continue to push the boundaries of this mission, in + - **Apache Flink** as a Pipeline Connector - **SAP ERP**, after a long and successful collaboration with our community and SAP experts - **Teradata** as a community contribution from [gbpy](https://github.com/gpby) to broaden the integration capabilities for enterprise-scale analytics and data management. @@ -393,7 +457,7 @@ To continue pursuing this objective, the application was completely refactored t **August 6th, 2024** {% /note %} -- Make `Include ddl` disabled by default +- Make `Include ddl` disabled by default - Made DDL configuration consistent with views - Fix user profile task listing. - Fix import/export UI flow ${CollateIconWithLinkMD}. @@ -543,11 +607,13 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta ## Backward Incompatible Changes ### Tooling + - Metadata Backup/Recovery is deprecated. No further support will be provided. - Users are advised to use database native tools to backup and store it in their object store for recovery. - `bootstrap/bootstrap_storage.sh` has been deprecated in favor of bootstrap/openmetadata-ops.sh ### UI + - Activity has been improved. New update specific cards display critical information such as data quality test case updates, description, tag update or removal. - For Lineage, the Expand All button has been removed. A new Layers button is introduced at the bottom left corner. With the Layers button, you can add Column Level Lineage or Data Observability details to your Lineage view. - View Definition is now renamed as Schema Definition. @@ -555,6 +621,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - For Classification, users can set classification to be mutually exclusive only at the time of creation. Once created, you cannot change it back to mutually non-exclusive or vice-versa. This is to prevent conflicts of adding multiple tags that belong to same classification and later turning the mutually exclusive flag back to true. ### API + - Table Schema's `ViewDefinition` is now renamed to `SchemaDefinition` to capture Tables' Create Schema. - Bulk Import API now creates entities if they are not present during the import. - Table's TestSuite is migrated to EntityReference. Previously it used to store entire payload of TestSuite. @@ -578,6 +645,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - APIs are available in OSS. ## Data Quality Improvements + {% youtube videoId="UNOHvBMVcYM" start="0:00" end="1:28" width="560px" height="315px" /%} - The Table schema page now shows the Data Quality tests for each column. @@ -588,17 +656,20 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Support has been added for an empty string as a missing count. ## Data Profiler + - Implemented a global profiler configuration page, allowing admin to exclude certain metric computations for specific data types. - Added profiler support for Redshift complex types and DynamoDB. - Fixed an issue with performing sum operations for large values in profiler ingestion. - Fixed the histogram unit's issues with scientific notation. ## Incident Manager + - We now display a sample of failed rows for the latest failed test cases. Once the issue is resolved, the failed sample will be deleted. (Collate Only) - Fixed the Date time filter for the Incident Manager. - Notifications are sent for the tasks created by the Incident Manager. ## Lineage Improvements + https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Column Lineage Search @@ -623,10 +694,12 @@ https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Support has been added for dynamic tables. ## Data Insights + - Previously, the data insights reports displayed only the percentage coverage of ownership and description. Now, users can drill down to view the data assets with no owner or description. - Improved the UX for data insight filters. ## Cost Analysis (Collate Only) + - Lifecycle data for Cost Analysis has been implemented for BigQuery, Snowflake, and Redshift. ## Custom Theme @@ -644,10 +717,12 @@ https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Added a Data Quality Widget to list the summary of data quality tests belonging to a user or their team. ## Ingestion Performance Improvements + - Bigquery, Redshift, and Snowflake now support incremental metadata ingestions by scanning DML operations on the query history. - Database Services now support parallelizing the metadata ingestion at each schema. ## Connectors + - Now supports a new connector for QlikCloud. - New Kafka Connect connector - We now parse complex protobuf schemas for Kafka @@ -656,7 +731,7 @@ https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Added an option to include or exclude paused pipelines in Airflow. - Revamped SSL support to allow users to upload the required certificates directly in the UI. - The character support has been enhanced for tag ingestion to include /. -- In the Oracle connector, we rolled back to use all_ tables instead of dba_. +- In the Oracle connector, we rolled back to use all* tables instead of dba*. - Added support for Azure auth in Trino. - For QlikSense, we have added an option to disable SSL validation. @@ -667,25 +742,31 @@ https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Custom Properties now allow linking other assets in the platform, such as Tables, Dashboards, etc. To enable this, create a Custom Property as an Entity Reference or Entity Reference List. ## Glossary + - The glossary term parent can now be changed from the Details page. - On the data assets page, glossary terms are displayed by hierarchy. ## Alerts & Notification Improvements + - The Activity Feed provides more contextual information, removing the need to move to entity pages. - Alerts give more accurate information about the entity, as well as conversations and tasks. ## Localization + - Fixed localization issues in the confirmation logic for the delete function. - Fixed the search index language configuration. ## Roles + - Now, roles can be inherited from the user configuration in SSO. ## Search + - You can now filter by assets without a description or an owner. - Improved the match results for search results. ## Others + - The description is auto-expanded when the data asset has no data and has the space to accommodate a lengthy description. - User email IDs have been masked and are only visible to Admins. - Users can filter Queries by owner, tag, and creation date in the UI. @@ -696,7 +777,6 @@ https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Redundant scroll bars have been removed from the UI. - Improved the bot role binding to provide more control over which roles are passed to the system bots. - # 1.3.4 Release 🎉 {% note noteType="Tip" %} @@ -735,21 +815,25 @@ Learn how to upgrade your OpenMetadata instance to 1.3.3! You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.3.2-release). ## MetaPilot (Collate) + - New MetaPilot application shipped in preview mode. Try it out in the [Sandbox](https://sandbox.open-metadata.org/)! - Get automatically generated descriptions with GenAI. Now it’s easier than ever to document your data assets. - Chat with the MetaPilot and get SQL queries to help you extract relevant information from your data assets. - Let the MetaPilot help you understand and improve the queries used on your tables. ## Authentication Flow + - Added generic support for OIDC Authentication. This is SSO provider-agnostic. - You can now integrate Confidential Clients to manage the server authentication. - Now, the session renewal happens automatically in the backend. ## Data Quality + - Pagination support was added for the Data Quality tab for data assets. - Fixed an issue with execution summary timeout issue for the data quality test. ## Connectors + - New Bigtable connector. - Now, users can configure the external sample data storage path. - Added lineage support for Snowflake materialized view and masking policies. @@ -760,6 +844,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Improve PATCH generation for array fields. ## Other Changes + - Avoid creating duplicated queries. - Speed up the server start time by moving the Secrets Manager Migration to the migration container. - Fixed the issue with the date filter for the Incident Manager. @@ -768,6 +853,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fixed an issue with search indexing. - Fixed the missing input field for conversation source for alerts and notifications. - Filter dashboards by a project on the Explore page. + --- **Full Changelog**: [link](https://github.com/open-metadata/OpenMetadata/compare/1.3.1-release...1.3.2-release) @@ -780,30 +866,36 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.3.1-release). {% /note %} - ## Knowledge Center (Collate) + - Supports drag and drop for the hierarchy of knowledge articles. - Enhanced the layout and loading experience of the knowledge page. ## Lineage + - When adding a new node in Lineage, the Display Name is supported in search. - Fixed the issues with displaying lineage from Metabase. ## Glossary + - Improved the automation of performance tests for Glossary. - Performance improvements to display a large Glossary. ## Data Insights + - Data Insights report has been improved. - The cost Analysis report has been optimized. ## Notifications + - The format for Slack notifications has been improved. ## Custom Properties + - Added enum type support for custom properties. ## Connectors + - Now BigQuery connector supports Primary, Foreign, and Unique Constraints. It fetches the column description for views. - Captures the SQL query that powers a Tableau DataModel. - Azure Key Vault is supported as a Secrets Manager. @@ -813,6 +905,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fixed an issue with the service display name after ingestion. ## Other Changes + - The functionality for mutually exclusive tags has been disabled. - PodGC set up for Argo workflows to delete the pods from the Kubernetes environment on a successful run of the pods. - Fixed the issue with the display of the personal access token. @@ -823,11 +916,9 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fixed the issues with testing the email settings. - Fixed an issue with adding tags. - - # 1.3.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2024, February 5th** [OpenMetadata 1.3 Release - Intuitive Lineage UI, Data Observability Alerts, Data Quality Incident Manager, Custom Metrics for Profiler, Knowledge Center Improvements, and lots more](https://blog.open-metadata.org/openmetadata-release-1-3-ac801834ee80) @@ -893,12 +984,14 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Custom metrics can be created at Table and Column levels. ## Profiler and Data Quality + - The Profiler has been improved to support sample data ingestion without computing other metrics. - Admins can configure the profiler to fetch up to 10,000 rows of sample data. - Sample data can be stored in S3 buckets. - Refined the default time range on the test case results page, adjusting it from the Last 3 days to the Last 30 days for a more encompassing view. ## Connectors + - New Google Cloud Storage for storage services. (Collate) - New Alation connector to migrate metadata into Collate. (Collate) - New Iceberg, SAS Viya, and Doris connectors. @@ -913,6 +1006,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - We now extract table descriptions when ingesting metadata from Salesforce. ## Glossary + - Supports soft delete for the default glossaries in OpenMetadata. - Supports the creation of tasks to request tags or a description. - Only the Owner can edit the Glossary term. @@ -932,10 +1026,12 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - The Settings page UI has been revamped. ## Data Insights + - Cost Analysis expanded to support BigQuery & Redshift. (Collate) - Improved the Data Insights Report sent via email. ## Other Changes + - Announcements can be notified over email, Slack, or Teams. - Alerts are sent to a user when they are mentioned in a task or activity feed. - We have improved the display of search results for column matches. When searching for columns, the matched results will be displayed and highlighted in the Preview pane. @@ -951,7 +1047,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # 1.2.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, October 26th** [OpenMetadata 1.2 Release - Domains, Data Products, Search Index, Stored Procedures, Glossary Approval Workflow, Customizable Landing Page, Applications, Knowledge Center, Cost Analysis, and lots more](https://blog.open-metadata.org/openmetadata-release-1-2-531f0e3c6d9a) @@ -970,10 +1066,12 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Assets can also be added as Data Products in a Domain. ## Search Index + - Elasticsearch or Opensearch connectors can now bring in the search index metadata into OpenMetadata. - The connector will populate the index’s mapping, settings, and sample data. ## Stored Procedures + - Added support for Stored Procedures. - Snowflake, Redshift, and BigQuery connectors are updated to bring stored procedure metadata into OpenMetadata. - The metadata workflow will bring the Stored Procedures and parse their executions to extract lineage information. @@ -991,8 +1089,9 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Color coding helps to visually differentiate and identify the data assets, when glossary terms are added to them. ## OpenMetadata Browser Extension + - Updated the Chrome browser extension for OpenMetadata with the new UI. -- Added support for Databases, Database Schemas, Tables, Dashboards, Charts, Pipelines, and Topics. +- Added support for Databases, Database Schemas, Tables, Dashboards, Charts, Pipelines, and Topics. ## Build Automation Applications @@ -1004,11 +1103,13 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - We will continue to add new Applications in upcoming releases. ## Lineage + - Performance improvements made for lineage based on the new release of SQLfluff. - Added support for `UPDATE … FROM Snowflake` queries - Added column-level lineage support for `SELECT *` queries ## Connectors + - Greenplum connector is now supported. - Couchbase connector is now supported. - Azure Data Lake Storage is now supported. (Collate) @@ -1039,21 +1140,24 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # 1.1.2 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, August 24th** {% /note %} ## Data Quality + - Added support for Postgres version 11.19. - Fixed MariaDB time column issues. ## Connectors + - Added JWT authentication support for Trino. - Fixed Snowflake connection test. - Fixed SageMaker ingestion. - Added external table support for BigQuery. ## UI Improvements + - Added Russian language support. - Supports Delete functionality for sample data. - Improved Schema page UX. @@ -1061,9 +1165,11 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fixed the version history list. ## Ingestion + - Improved performance when ingesting table constraints. ## Backend + - Improved Glossary import validations. - Fixed Test Suite migrations and naming. - Fixed Classification migration. @@ -1072,7 +1178,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # 1.1.1 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, August 7th** {% /note %} @@ -1085,15 +1191,18 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Support custom cron for schedule ingestion ## Data Quality + - Fix BigQuery, MSSQL, and Clickhouse profiling errors ## Ingestion + - Fixed Airflow lineage extraction. - Added support for Databricks complex columns comments. - Fixed Athena lineage and usage parameter validation. - Airflow Managed APIs now support Airflow 2.6 ## Connectors + - New [Qliksense](qlik.com) Connector. - Hive supports extracting metadata directly from the metastore to speed up the execution. Users whose metastore is not exposed can still run the extraction pointing to Hive. - Added Usage & Lineage connector for Trino. @@ -1102,14 +1211,14 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Added support for JSON fields in SingleStore. ## Backend + - Bumped table and column names length - Aggregation Improvements for Search - Test Suite Improvements - # 1.1.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, June 30th** [OpenMetadata 1.1.0 Release - UI Overhaul, New Connectors, Improved Lineage Parsing, PII Masking, and lots more](https://blog.open-metadata.org/openmetadata-1-1-0-release-97c1fb603bcf) @@ -1158,84 +1267,97 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Improved monitoring of the Pipeline Service Client health. Any status errors between the OpenMetadata server and the Pipeline Service Client are now surfaced in a Prometheus metric `pipelineServiceClientStatus_counter_total` - Added AWS OpenSearch client-specific support. This allows us to update the Elasticsearch version support up to 7.16. - # 1.0.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, April 25th** [OpenMetadata 1.0 Release - Improved Schemas & APIs, Ingestion Improvements, Storage Services, Dashboard Data Models, Auto PII Classification, Localization, and much more](https://blog.open-metadata.org/openmetadata-1-0-release-beb34762d916) {% /note %} ## APIs & Schema + - **Stabilized** and improved the Schemas and APIs. - The APIs are **backward compatible**. ## Ingestion + - Connecting to your data sources has never been easier. Find all the necessary **permissions** and **connection details** directly in the UI. - When testing the connection, we now have a comprehensive list of **validations** to let you know which pieces of metadata can be extracted with the provided configuration. - **Performance** improvements when extracting metadata from sources such as Snowflake, Redshift, Postgres, and dbt. - New **Apache Impala** connector. ## Storage Services + - Based on your [feedback](https://github.com/open-metadata/OpenMetadata/discussions/8124), we created a new service to extract metadata from your **cloud storage**. - The Data Lake connector ingested one table per file, which covered only some of the use cases in a Data Platform. With **Storage Services**, you can now present accurate metadata from your tables, even when **partitioned**. - The first implementation has been done on **S3**, and we will keep adding support for other sources in the upcoming releases. ## Dashboard Data Models + - Dashboard Services now support the concept of **Data Models**: data that can be directly defined and managed in the Dashboard tooling itself, e.g., LookML models in Looker. - Data Models will help us close the gap between engineering and business by providing all the necessary metadata from sources typically used and managed by analysts or business users. - The first implementation has been done for **Tableau** and **Looker**. ## Queries + - Improved UI for **SQL Queries**, with faster loading times and allowing users to **vote** for popular queries! - Users can now create and share a **Query** directly from the UI, linking it to multiple tables if needed. ## Localization + - In 1.0, we have added **Localization** support for OpenMetadata. - Now you can use OpenMetadata in **English**, **French**, **Chinese**, **Japanese**, **Portuguese**, and **Spanish**. ## Glossary + - New and Improved **Glossary UI** - Easily search for Glossaries and any Glossary Term directly in the **global search**. - Instead of searching and tagging their assets individually, users can add Glossary Terms to multiple **assets** from the Glossary UI. ## Auto PII Classification + - Implemented an automated way to **tag PII data**. - The auto-classification is an optional step of the **Profiler** workflow. We will analyze the column names, and if sample data is being ingested, we will run NLP models on top of it. ## Search + - **Improved Relevancy**, with added support for partial matches. - **Improved Ranking**, with most used or higher Tier assets at the top of the search. - Support for **Classifications** and **Glossaries** in the global search. ## Security + - **SAML** support has been added. - Added option to mask passwords in the API response except for the `ingestion-bot` by setting the environment variable `MASK_PASSWORDS_API=true`. More info [here](/deployment/security/enable-password-masking). - **Deprecation Notice**: **SSO** Service accounts for Bots will be deprecated. **JWT** authentication will be the preferred method for creating Bots. ## Lineage + - Enhanced Lineage UI to display a large number of **nodes (1000+)**. - Improved UI for **better navigation**. - Improved **SQL parser** to extract lineage in the Lineage Workflows. ## Chrome Browser Extension + - All the metadata is at your fingertips while browsing Looker, Superset, etc., with the OpenMetadata Chrome Browser Extension. - **Chrome extension** supports Google SSO, Azure SSO, Okta, and AWS Cognito authentication. - You can Install the Chrome extension from **Chrome Web Store**. ## Other Changes + - The **Explore page** cards will now display a maximum of **ten tags**. - **Entity names** support apostrophes. - The **Summary panel** has been improved to be consistent across the UI. # 0.13.3 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, March 30th** {% /note %} ## Ingestion Framework + - Datalake Avro & Json, JsonZip support - BigQuery Profiler Ingestion for all regions - Support for Snowflake Geometry Type @@ -1254,57 +1376,69 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Support for DBT manifest V8 ## Roles & Policies + - A Non-Privileged user can add new 'Roles' to Teams - Fix Permissions API to consider the leaf nodes tags as well, example: table's column tags ## Search + - Improve Search Relevancy, by adding functional scoring and add ngram analyzer; - Enable search for entities using both name and displayName ## Security + - Enable LDAP configuration to be configured via environment variable - LDAP-s support connection without MTLS ## EntityName + - Relax data asset name restrictions to allow the special characters except "::" - Allow unicode character and digits in Entity ## Data Quality + - Fix column values between test # 0.13.2 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, January 30th** [OpenMetadata 0.13.2 Release - Improved SQL Lineage, Glossary Bulk Upload, Unified Tag Category API, Mutually Exclusive Tags, Chrome Extension, and lots more](https://blog.open-metadata.org/openmetadata-0-13-2-release-e32c0de93361) {% /note %} ## Improved SQL Lineage + - We have collaborated with the [sqllineage](https://github.com/reata/sqllineage) and [sqlfluff](https://www.sqlfluff.com/) communities - to improve the parsing capabilities of `sqllineage`. We'll continue to collaborate to ship further improvements in new releases. + to improve the parsing capabilities of `sqllineage`. We'll continue to collaborate to ship further improvements in new releases. ## New Glossary UI + - Moved from a tree view in the left panel to an easy to navigate list of the terms sorted alphabetically. - The term list shows the tags and descriptions in the cards. ## Glossary Import & Export + - You can now export your Glossary data as a CSV file. - In the same way, you can now bulk upload terms to a Glossary by adding their details in a CSV file. - The import utility will validate the file and show you a preview of the elements that are going to be imported to OpenMetadata. ## Unified Tag Category API + - Renamed Tag Categories to Classification, a more widely used term. - Updated the API to conform with the rest of the specification. More info [here](https://github.com/open-metadata/OpenMetadata/issues/9259). ## Mutually Exclusive Tags + - When creating a Classification or a Glossary term, you can now make the tags to be mutually exclusive. - If tags are set to be mutually exclusive, you won't be able to set multiple tags from the same category in the same asset. ## EntityName + - Special characters ## Ingestion Framework + - Performance Improvements: We are now getting descriptions in batch, making connectors such as Redshift or Snowflake way faster! - The Oracle connector now ships with the Thick mode enabled. - AWS QuickSight fixes @@ -1314,36 +1448,41 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # 0.13.1 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, December 20th** {% /note %} ## Profiler and Data Quality + - Freshness Metric has been introduced. Data freshness shows DML operations performed against a table and the number of rows affected. All this is displayed within the data profiler with filterable graphs. This is currently supported for BigQuery, Snowflake, and Redshift. - Support has been added for data quality tests on Data Lake. - UI has been improved to show table and column profile data on separate page. Legend is now selectable to filter for specific metrics ## Alerts and Notification + The logic for Notification Support has been improved. Users can define Alerts based on a Trigger (all data assets or a specific entity), Filters (events to consider), and Action (Slack, MS Teams, Email, Webhook) on where to send the alert. ## Ingestion -- Now, dbt has its own workflow. Previously, dbt was a part of metadata ingestion workflow. + +- Now, dbt has its own workflow. Previously, dbt was a part of metadata ingestion workflow. - Airflow Lineage Operator and the OpenMetadata Hook are now part of the ingestion package. Send Airflow metadata from your DAGs and safely store the OpenMetadata server connection directly in Airflow. - Multiple Databases (catalog) is now supported for the Databricks connector - Azure blob is now supported to backup your metadata into ## New Connectors + - OpenMetadata now supports Azure Datalake Storage Gen 2 ## General Improvements + - Users can update the description and tags for Topic Schema. Previously, the topic schemas were read-only. We now support Avro/Protobuf parsing and field level details for topic schemas. -- The layout for the Data Insight Report has been improved. We now display a line graph instead of a bar graph. The Most Viewed Data Assets are clickable to view the asset details page. +- The layout for the Data Insight Report has been improved. We now display a line graph instead of a bar graph. The Most Viewed Data Assets are clickable to view the asset details page. - Improvements have been made to Advanced Search. Now, when a filter is applied, the details of the filter selected are displayed for clarity. - On the Explore page UI, the Side Preview is now available for all data assets. Previously it was only displayed for tables. # 0.13.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, December 8th** [OpenMetadata 0.13.0 Release — Data Insights & KPIs, Lineage Traceability, Data Lake Profiler, Search Improvements, and lots more](https://blog.open-metadata.org/openmetadata-0-13-0-release-ac8ac5bd87c1) @@ -1352,58 +1491,70 @@ The logic for Notification Support has been improved. Users can define Alerts ba {% youtube videoId="oNbMnTW5AkE" start="0:00" end="7:51" width="560px" height="315px" /%} ## Data Insights and KPI + Data Insight allows admins to take an active approach in their metadata management. Data Insight provides a single-pane view of all the key metrics to best reflect the state of your data. Admins can define the Key Performance Indicators (KPIs) and set goals within OpenMetadata to work towards better documentation, ownership, and tiering. Alerts can be set against the KPIs to be received on a specified schedule. ## Lineage + The lineage UI has been transformed to enhance user experience. Users can get a holistic view of an entity from the Lineage tab. When an entity is selected, the UI displays end-to-end lineage traceability for the table and column levels. ## Profiler + With the OpenMetadata UI, users can now create and deploy profiling workflows for the Datalake connector, which supports AWS S3 and GCS ## SSO + Support for LDAP SSO has been added in this release ## Advance Search + Syntax Editor has been introduced for advanced search with And/Or conditions that help discover assets quickly ## New Connectors + - AWS SageMaker - AWS QuickSight - AWS Kinesis - Domo ## Messaging Service Schemas Improvements + Major enhancements have been made to how data is extracted from Kafka and Redpanda Messaging services. Previously, OpenMetadata extracted all the Topics in the messaging queue and also connected to the Schema Registry to get the Schemas. These schemas were taken as one payload and published to OpenMetadata. We now parse Avro and Protobuf Schemas to extract the fields. Now, users can document each of these fields within a schema by adding descriptions and tags. Users can search based on the fields in the Schema of a Topic. ## General Improvements + - Soft deleted entities can be restored. Currently, only the ML Models are not supported. - Soft deleted teams can be restored. When restoring a soft deleted parent team, the child teams will not be restored by default. # 0.12.3 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, November 18th** {% /note %} ## Bug Fixes + - User suggestion index mapping - Tag and Glossary terms caching # 0.12.2 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, October 20th** {% /note %} ## Ingestion + - Databricks lineage - Added support for Airflow version 2.2.2 as a workflow scheduler + ## Bug Fixes + - Support same table across different databases for the profiler # 0.12.1 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, October 3rd** {% /note %} @@ -1411,7 +1562,7 @@ Major enhancements have been made to how data is extracted from Kafka and Redpan - User/Password signup and login - Email notifications for forgotten password and new user signed up -- Admin can add new users and send an email +- Admin can add new users and send an email ## ElasticSearch full re-index through UI @@ -1425,17 +1576,17 @@ Major enhancements have been made to how data is extracted from Kafka and Redpan - We support ingesting DBT tags into OpenMetadata -## Bots Integration +## Bots Integration - Admins can create bots and their security mechanism from UI itself ## Bug Fixes -- Around 136 Features/Improvements/Tests made it into 0.12.1 release +- Around 136 Features/Improvements/Tests made it into 0.12.1 release # 0.12.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, September 7th** [OpenMetadata 0.12.0 Release](https://blog.open-metadata.org/openmetadata-0-12-0-release-1ac059700de4) @@ -1444,6 +1595,7 @@ Major enhancements have been made to how data is extracted from Kafka and Redpan {% youtube videoId="tv3pyCLcJfQ" start="0:00" end="17:04" width="560px" height="315px" /%} ## Team Hierarchy + Prior releases supported a flat hierarchy of just Teams and Users. In 0.12, support has been added for the entire organizational hierarchy with Business Unit, Division, Department, and Groups. An organization from small to very large can now be modeled in OpenMetadata with this feature. ## Roles and Policies @@ -1476,18 +1628,18 @@ In 0.12, we’ve also streamlined the Notifications menu with two separate tabs Users can get timely updates about the metadata change events for all entities through APIs using webhooks. The webhook integration with Slack has been further improved in this release. -OpenMetadata also supports webhook integration to Microsoft Teams, just as it supports Slack. Users can choose to receive notifications for only the required entities by using event filters based on when an entity is created, updated, or deleted. +OpenMetadata also supports webhook integration to Microsoft Teams, just as it supports Slack. Users can choose to receive notifications for only the required entities by using event filters based on when an entity is created, updated, or deleted. ## Tasks In the 0.11 release, a request to add or update descriptions for data assets could be converted to a Task. In the 0.12 release, Tasks can be created based on requests to create or update tags. Also, a glossary term approval workflow can be converted to a Task. - ## Secret Management Store Interface In 0.12, we have completely revamped how that secret is stored, accessed, and by whom; by introducing a Secrets Manager Interface to communicate with any Key Management Store. The KMS will mediate between any OpenMetadata internal requirement and sensitive information. That way, users can choose to use the underlying database as KMS, or any external system. The OpenMetadata community has already added support for AWS Key Management Service and AWS SSM. ## Connectors + New connectors are an essential part of every release in OpenMetadata. We are introducing four new connectors in this release: - Redpanda is a Kafka API-compatible streaming data platform for developers that unifies historical and real-time data. OpenMetadata now supports Redpanda as a Messaging service, which allows users to document its topics and schemas. Refer to the Redpanda documentation for more info. @@ -1496,6 +1648,7 @@ New connectors are an essential part of every release in OpenMetadata. We are in - Apache NiFi automates the flow of data between systems. OpenMetadata now supports a NiFi connector as the third new pipeline service on this release. ## Lineage + We’ve enhanced the performance of workflows by having a separate workflow for Lineage and Usage. By using two workflows for computing specific pieces of information, we can effectively filter down the queries to extract lineage. During table usage ingestion, the tables retrieved successfully will be cached, so that there is no need to repeat the same calls multiple times as many queries would be referencing the same tables. @@ -1503,10 +1656,11 @@ Usage queries have been optimized. A result limit has been added to Usage queries. ## Global Settings -The OpenMetadata Settings dropdown menu has been transformed into a single, centralized Settings page for added convenience in viewing all the available options. The Global Settings comprises setting options for Team Members, Access based on Roles and Policies, Services, Data Quality, Collaboration, Custom Attributes, and Integrations for webhooks and bots. Admins can view or update settings for various services like Slack, MS Teams, Webhooks, etc from the Global Settings page. +The OpenMetadata Settings dropdown menu has been transformed into a single, centralized Settings page for added convenience in viewing all the available options. The Global Settings comprises setting options for Team Members, Access based on Roles and Policies, Services, Data Quality, Collaboration, Custom Attributes, and Integrations for webhooks and bots. Admins can view or update settings for various services like Slack, MS Teams, Webhooks, etc from the Global Settings page. ## UI/UX Improvements + The major UI UX improvements have been done around Roles and Policies and a Global Settings page. Quite a lot of tweaks have been made to the UI to improve the UX. When creating a new user or when a user is registering for the first time, the dropdown menu for Teams now displays an option to ‘Show All’ teams. Previously, we supported the display of only the first 10 teams. An option has also been provided to search and filter. @@ -1515,13 +1669,14 @@ Manage Tab has been replaced with the manage button on the UI. # 0.11.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, July 1st** [OpenMetadata 0.11.0 Release](https://blog.open-metadata.org/openmetadata-0-11-release-8b82c85636a) {% /note %} ## Data Collaboration - Tasks, Announcements, & Emojis + - Tasks have been introduced as an extension to the ability to create conversations and post replies. - Tasks can be created around descriptions for tables, pipelines, dashboards, and topics. - Users can Request a description, or even Suggest a new description and make edits to an existing description. @@ -1533,14 +1688,17 @@ Manage Tab has been replaced with the manage button on the UI. - Task owners can provide description or accept/reject suggestions and those tasks are automatically closed. ## Column Level Lineage + - Column level lineage API support has been added in the backend. - Supports table level and column level lineage from Snowflake, Redshift, and BigQuery. ## Custom Properties + - Now supports adding new types and extending entities when organizations need to capture custom metadata. - New types and custom fields can be added to entities either using API or in OpenMetadata UI. ## Advanced Search + - Users can search by column, schema, database, owner, tag, and service. - Users can search by multiple parameters to narrow down the search results. - Separate advanced search options are available for Tables, Topics, Dashboards, Pipelines, and ML Models. @@ -1548,11 +1706,13 @@ Manage Tab has been replaced with the manage button on the UI. - Entity specific search options are also available - table specific options include Column, Schema, and Database, pipeline specific options include Task, and dashboards specific option includes Chart. ## Glossary UI Updates + - The Glossary UI has been upgraded. - The arrangement to display the Summary, Related Terms, Synonyms, and References has been changed. - Reviewers are shown on the right panel with an option to add or remove existing reviewers. ## Profiler and Data Quality Improvements + - Seven additional data quality tests have been added as follows. - - tableColumnCountToBeBetween: Ensure the number of columns in your table stays within the expected range - - tableColumnNameToExist: Check that a specific column is in your table @@ -1566,10 +1726,12 @@ Manage Tab has been replaced with the manage button on the UI. - Developed a direct integration between Great Expectations and OpenMetadata. Now, you can add custom actions to your Great Expectations checkpoints file that will automatically ingest your data quality tests results into OpenMetadata at the end of your checkpoint file run. ## ML Models + - ML Model entities have been added to the UI. - Supports ingestion through the UI from MLflow. ## Connectors + - Five new connectors have been added - Airbyte, Mode, AWS Data Lake, Google Cloud Data Lake, and Apache Pinot. - DBT Cloud support was added and we now extract manifest and catalog files from API. - The ingestion scheduler now supports a minute level selection. @@ -1577,6 +1739,7 @@ Manage Tab has been replaced with the manage button on the UI. - The Looker connector now fetches the ‘Usage’ and ‘Access’ metadata for Dashboards and Charts. ## UI Improvements + - The OpenMetadata UI has a new layout. - In the Activity Feeds, the options to reply to a conversation, as well as to delete can now be found on hovering over the conversation. - Users can react with Emojis on the activity feeds, conversations and replies. @@ -1585,6 +1748,7 @@ Manage Tab has been replaced with the manage button on the UI. - A tooltip has been added to display the FQN on hover in the Activity Feed header. ## Other Changes + - Admin users define Roles and associate these roles to Teams. When a user picks a Team, the Role gets automatically assigned. - An option has been added to recreate a fresh index from the data available in Elasticsearch. - A simple webhook server has been added to the metadata command to register and listen to the metadata change events. @@ -1595,7 +1759,7 @@ Manage Tab has been replaced with the manage button on the UI. # 0.10.1 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, May 17th** {% /note %} @@ -1607,7 +1771,7 @@ Manage Tab has been replaced with the manage button on the UI. # 0.10.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, April 27th** [OpenMetadata 0.10.0 Release](https://blog.open-metadata.org/openmetadata-0-10-0-release-82c4f5533c3f) @@ -1615,7 +1779,7 @@ Manage Tab has been replaced with the manage button on the UI. ## Support for Database Schema -OpenMetadata supports databases, service name databases, and tables. We’ve added Database Schema as part of the FQN. +OpenMetadata supports databases, service name databases, and tables. We’ve added Database Schema as part of the FQN. For each external data source, we ingest the database, as well as the tables that are contained underneath the schemas. ## Support for Hard Delete @@ -1625,39 +1789,39 @@ and ingestion. Hard deleting an entity removes the entity and all of its relatio ## Deploy Ingestion from UI -OpenMetadata has refactored the service connections to simplify the ingestion jobs from both the ingestion framework +OpenMetadata has refactored the service connections to simplify the ingestion jobs from both the ingestion framework and the UI. We now use the pydantic models automatically generated from the JSON schemas for the connection definition. The ‘Add Service’ form is automatically generated in the UI based on the JSON schema specifications for the various connectors that are supported in OpenMetadata. ## Download dbt Manifest Files from Amazon S3 or Google Cloud Storage -Previously, when ingesting the models and lineage from dbt, we passed the path of the dbt manifest and catalog files -directly into the workflow. We’ve worked on improving the quality of life of dbt. Now, we can dynamically download -these files from Amazon S3 or Google Cloud Storage. This way we can have any other process to connect to the dbt, -extract the catalog, and put it into any cloud service. We just need the path name and workflow job details from the +Previously, when ingesting the models and lineage from dbt, we passed the path of the dbt manifest and catalog files +directly into the workflow. We’ve worked on improving the quality of life of dbt. Now, we can dynamically download +these files from Amazon S3 or Google Cloud Storage. This way we can have any other process to connect to the dbt, +extract the catalog, and put it into any cloud service. We just need the path name and workflow job details from the metadata extraction to be able to ingest metadata. ## JSON Schema based Connection Definition -Each service (database, dashboard, messaging, or pipeline service) has its own configuration specifications, with some -unique requirements for some services. Instead of the ad hoc definitions of the source module in Python for each +Each service (database, dashboard, messaging, or pipeline service) has its own configuration specifications, with some +unique requirements for some services. Instead of the ad hoc definitions of the source module in Python for each connector, we’ve worked on the full refactoring of the ingestion framework. We now use the pydantic models automatically generated from the JSON schemas for the connection definition. ## Airflow Rest APIs -The Airflow REST APIs have been refactored. With our API centric model, we are creating a custom airflow rest API +The Airflow REST APIs have been refactored. With our API centric model, we are creating a custom airflow rest API directly on top of Airflow using plugins. This passes the connection information to automatically generate all the dags and prepares handy methods to help us test the connection to the source before creating the service. ## UI Changes - The UI improvements are directed toward providing a consistent user experience. -- Hard Deletion of Entities: With the support for the hard deletion of entities, we can permanently delete tables, - topics, or services. When the entity is hard deleted, the entity and all its relationships are removed. +- Hard Deletion of Entities: With the support for the hard deletion of entities, we can permanently delete tables, + topics, or services. When the entity is hard deleted, the entity and all its relationships are removed. This generates an ‘EntityDeleted’ change event. -- Dynamic “Add Service” Forms: The ‘Add Service’ form is automatically generated in the UI based on the JSON +- Dynamic “Add Service” Forms: The ‘Add Service’ form is automatically generated in the UI based on the JSON schema specifications for the various connectors that are supported in OpenMetadata. - UI Support for Database Schema as part of FQN: The database schema has been introduced in the 0.10 release. All the entity pages now support Database Schema in the UI. @@ -1668,17 +1832,18 @@ and prepares handy methods to help us test the connection to the source before c - Add User: A user can be added from the Users page. ## Security Changes -- **Support Refresh Tokens for Auth0 and Okta SSO**: The JWT tokens generated by the SSO providers expire by default - in about an hour, making the user re-login often. In this release, we’ve added support for refresh tokens for Auth0 + +- **Support Refresh Tokens for Auth0 and Okta SSO**: The JWT tokens generated by the SSO providers expire by default + in about an hour, making the user re-login often. In this release, we’ve added support for refresh tokens for Auth0 and Okta SSO. The tokens are refreshed silently behind the scenes to provide an uninterrupted user experience. In future releases, we’ll continue to stabilize authentication and add refresh tokens for the other SSO providers. -- **Custom OIDC SSO**: OpenMetadata now supports integration with your custom-built OIDC SSO for authentication. +- **Custom OIDC SSO**: OpenMetadata now supports integration with your custom-built OIDC SSO for authentication. This is supported both on the front end for user authentication and on the ingestion side. - **Azure SSO**: Support has been added for Azure SSO on Airflow. # 0.9.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, March 10th** [OpenMetadata 0.9.0 Release](https://blog.open-metadata.org/openmetadata-0-9-0-release-8e7b93ab1882) @@ -1693,6 +1858,7 @@ and prepares handy methods to help us test the connection to the source before c - Table details - Click through on usage to see who or what services are using it, what queries are pulling from it. ## Data Quality + - Ability to create and monitor the test cases. - Data Quality Tests support with Json Schemas and APIs. - UI Integration to enable user to write tests and run them on Airflow. @@ -1702,10 +1868,11 @@ and prepares handy methods to help us test the connection to the source before c - Glossaries are a Controlled Vocabulary in an organization used to define the concepts and terminologies specific to a particular domain. - API & Schemas to support Glossary. -- UI support to add Glossary and Glossary Terms. +- UI support to add Glossary and Glossary Terms. - Support for using Glossary terms to annotate Entities and Search using Glossary Terms. ## Connectors + - Apache Iceberg - Azure SQL - Clickhouse @@ -1722,10 +1889,12 @@ and prepares handy methods to help us test the connection to the source before c - Amundsen, Import Metadata from Amundsen into OpenMetadata ## Lineage + - DataSource SQL Parsing support to extract Lineage - View Lineage support ## Pipeline + - Capture pipeline status as it happens ## Security @@ -1736,23 +1905,25 @@ and prepares handy methods to help us test the connection to the source before c # 0.8.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, January 22nd** [OpenMetadata 0.8.0 Release](https://blog.open-metadata.org/openmetadata-0-8-0-release-ca09bd2fbf54) {% /note %} ## Access Control Policies + - Design of Access Control Policies. - Provide Role based access control with community feedback. ## Eventing Webhook - Register webhooks to get metadata event notifications. -- Metadata Change Event integration into Slack and framework for integration into other services such as +- Metadata Change Event integration into Slack and framework for integration into other services such as Kafka or other Notification frameworks ## Connectors + - Delta Lake - Iceberg - PowerBI @@ -1760,25 +1931,29 @@ and prepares handy methods to help us test the connection to the source before c # 0.7.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2021, November 17th** [OpenMetadata 0.7.0 Release](https://blog.open-metadata.org/openmetadata-0-7-0-release-9f741b8d5089) {% /note %} ## UI - Activity Feed, Improved UX for Search + - Users will have access to Activity Feed of all the changes to the Metadata. - New and Improved UX for Search and Landing page. ## Support for Table Location + - Extract Location information from Glue, Redshift. - Show Location details on the Table Page. ## ElasticSearch Improvements + - Support SSL (including self-signed certs) enabled ElasticSearch. - New entities will be indexed into ElasticSearch directly ## Connectors + - Metabase - Apache Druid - Glue Improvements @@ -1787,88 +1962,104 @@ and prepares handy methods to help us test the connection to the source before c - Amundsen Import connector ## Other features + - Metadata Change Event integration into Slack and framework for integration into other services such as Kafka or other Notification frameworks - Delta Lake support, Databricks, Iceberg # 0.6.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2021, November 17th** [OpenMetadata 0.6.0 Release — Metadata Versioning, Events API, One-Click Ingestion, and more](https://blog.open-metadata.org/openmetadata-0-6-0-release-metadata-versioning-events-api-one-click-ingestion-and-more-4394c4f08e0b) {% /note %} ## Metadata Versioning and Eventing Framework + - Capture changes to Entity Metadata from source and user interactions as versions. - Versioned changes will be published as events for clients to consume to take actions on. ## Data Reliability + - Improvements to Data Reliability library. - Capture custom measurements through user provided SQL. ## Airflow APIs + - Airflow APIs to deploy DAGS and manage them. - UI integration to deploy ingestion workflows. ## Connectors + - AWS Glue - dbt - MariaDB # 0.5.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2021, October 19th** [OpenMetadata 0.5.0 Release is here — Lineage, Pipelines, Complex Types, Data Profiler and so much more](https://blog.open-metadata.org/openmetadata-0-5-0-1144a4000644) {% /note %} ## Support for Lineage + - Lineage related schemas and APIs. - Lineage metadata integration from AirFlow for tables. - UI changes to show lineage information to the users. ## Data Reliability + - Improvements to Data Profiler. - UI integration with Data Profiler to show how the table profile looks over the period of time. ## Complex Types + - Support complex types such as Struct, Array with nested fields. - UI support to add expand complex types and tag, add description for nested fields. ## Connectors + - Trino - Redash ## Other features + - Pipeline Entities are supported. - Integration with Airflow to extract Pipeline details. # 0.4.0 Release -{% note noteType="Tip" %} + +{% note noteType="Tip" %} **2021, September 20th** [OpenMetadata 0.4.0 Release — Dashboards, Topics, Data Reliability](https://blog.open-metadata.org/openmetadata-0-4-0-release-dashboards-topics-data-reliability-14e8672ae0f5) {% /note %} ## Support for Kafka (and Pulsar WIP) + - Support for Message Service and Topic entities in schemas, APIs, and UI. - Kafka connector and ingestion support for Confluent Schema Registry. ## Support for Dashboards + - Support for Dashboard services, Dashboards, and Charts entities in schemas, APIs, and UI. - Looker, Superset, Tableau connector, and ingestion support. ## User Interface + - Sort search results based on Usage, Relevance, and Last updated time. - Search string highlighted in search results. - Support for Kafka and Dashboards from Looker, Superset, and Tableau. ## Other features + - Pluggable SSO integration - Auth0 support. - Support for Presto. ## Work in progress + - Salesforce CRM connector. - Data profiler to profile tables in ingestion framework and show it table details page. diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/collate-menu.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/collate-menu.md index 85210411b009..9ce3150435d6 100644 --- a/openmetadata-docs/content/v1.6.x-SNAPSHOT/collate-menu.md +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/collate-menu.md @@ -181,6 +181,12 @@ site_menu: url: /connectors/database/sap-hana - category: Connectors / Database / SAP Hana / Run Externally url: /connectors/database/sap-hana/yaml + - category: Connectors / Database / SAP ERP + url: /connectors/database/sap-erp + - category: Connectors / Database / SAP ERP / Run Externally + url: /connectors/database/sap-erp/yaml + - category: Connectors / Database / SAP ERP / Setup SAP ERP APIs + url: /connectors/database/sap-erp/setup-sap-apis - category: Connectors / Database / SAS url: /connectors/database/sas - category: Connectors / Database / SAS / Run Externally diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/database/sap-erp/index.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/database/sap-erp/index.md new file mode 100644 index 000000000000..13d1129b77b1 --- /dev/null +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/database/sap-erp/index.md @@ -0,0 +1,67 @@ +--- +title: SAP ERP +slug: /connectors/database/sap-erp +--- + +{% connectorDetailsHeader +name="SAP ERP" +stage="PROD" +platform="OpenMetadata" +availableFeatures=["Metadata", "dbt"] +unavailableFeatures=["Query Usage", "Stored Procedures", "Owners", "Tags","Data Profiler", "Data Quality", "View Lineage", "View Column-level Lineage"] +/ %} + + +In this section, we provide guides and references to use the SAP ERP connector. + +Configure and schedule SAP ERP metadata workflow from the OpenMetadata UI: + +- [Requirements](#requirements) +- [Metadata Ingestion](#metadata-ingestion) +- [dbt Integration](/connectors/ingestion/workflows/dbt) + +{% partial file="/v1.5/connectors/ingestion-modes-tiles.md" variables={yamlPath: "/connectors/database/sap-erp/yaml"} /%} + +## Requirements + +To ingest the SAP ERP metadata, CDS Views and OData services need to be setup to efficiently expose SAP data. To achieve this, data must be exposed via RESTful interfaces. +Follow the guide [here](/connectors/database/sap-erp/setup-sap-apis) to setup the APIs. + +## Metadata Ingestion + +{% partial + file="/v1.5/connectors/metadata-ingestion-ui.md" + variables={ + connector: "SAP ERP", + selectServicePath: "/images/v1.5/connectors/sap-erp/select-service.png", + addNewServicePath: "/images/v1.5/connectors/sap-erp/add-new-service.png", + serviceConnectionPath: "/images/v1.5/connectors/sap-erp/service-connection.png", +} +/%} + +{% stepsContainer %} +{% extraContent parentTagName="stepsContainer" %} + +#### Connection Details + +- **Host and Port**: This parameter specifies the host and port of the SAP ERP instance. This should be specified as a string in the format `https://hostname.com`. +- **API Key**: Api Key to authenticate the SAP ERP Apis. +- **database**: Optional name to give to the database in OpenMetadata. If left blank, we will use `default` as the database name. +- **databaseSchema**: Optional name to give to the database schema in OpenMetadata. If left blank, we will use `default` as the database schema name. +- **paginationLimit**: Pagination limit used while querying the SAP ERP APIs for fetching the entities. + +{% partial file="/v1.5/connectors/database/advanced-configuration.md" /%} + +{% /extraContent %} + +{% partial file="/v1.5/connectors/test-connection.md" /%} + +{% partial file="/v1.5/connectors/database/configure-ingestion.md" /%} + +{% partial file="/v1.5/connectors/ingestion-schedule-and-deploy.md" /%} + +{% /stepsContainer %} + +{% partial file="/v1.5/connectors/troubleshooting.md" /%} + +{% partial file="/v1.5/connectors/database/related.md" /%} diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/database/sap-erp/setup-sap-apis.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/database/sap-erp/setup-sap-apis.md new file mode 100644 index 000000000000..19b17292cb39 --- /dev/null +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/database/sap-erp/setup-sap-apis.md @@ -0,0 +1,147 @@ +--- +title: Setup SAP ERP APIs +slug: /connectors/database/sap-erp/setup-sap-apis +--- +# Setup SAP ERP APIs + +In this section, we provide guides and references to use setup the SAP ERP APIs needed for the connector. + +This document details the integration of Open Metadata with SAP systems, emphasizing the use of CDS Views and OData services to efficiently expose SAP data. To achieve this, data must be exposed via RESTful interfaces. Key concepts include: + +- **SAP Gateway**: A software component that bridges RFC and RESTful interfaces. +- **RAP (Restful Application Programming)**: A coding framework designed to expose services via RESTful interfaces. +- **CDS (Core Data Services)**: A layer that describes data objects and annotates them with desired functionalities, which are converted into code upon activation. +- **OData V2 or V4**: A RESTful standard that simplifies interaction with database backends. + + +## Steps +### 1. ABAP Development Tools (ADT) +Using the Eclipse based [ABAP Development Tools (ADT)](https://tools.hana.ondemand.com/#abap) the Restful interfaces are built. + +### 2. CDS Views +After creating a new ABAP Project for the connected SAP system, a new Data Definition object is to be created. + +{% image +src="/images/v1.5/connectors/sap-erp/data-definition-object.png" +alt="Data Definition Object" +caption="Create data definition object" /%} + +- Create the first view that gets the table metadata +```sql +@AbapCatalog.sqlViewName: 'ZZ_I_DDIC_TAB_CDS' +@AbapCatalog.compiler.compareFilter: true +@AbapCatalog.preserveKey: true +@AccessControl.authorizationCheck: #CHECK +@EndUserText.label: 'Open Metadata Tables' +define view ZZ_I_DDIC_TAB_CDS as select from dd02l l +left outer join dd02t t on + (l.tabname = t.tabname and l.as4local = t.as4local and l.as4vers = t.as4vers and t.ddlanguage = 'E') +{ +key l.tabname, +key l.as4local, +key l.as4vers, + @ObjectModel.readOnly: true + 'E' as LANG, + @ObjectModel.readOnly: true + l.tabclass, + @ObjectModel.readOnly: true + l.sqltab, + @ObjectModel.readOnly: true + l.applclass, + @ObjectModel.readOnly: true + l.authclass, + @ObjectModel.readOnly: true + l.as4date, + @ObjectModel.readOnly: true + l.as4time, + @ObjectModel.readOnly: true + l.masterlang, + @ObjectModel.readOnly: true + t.ddtext +} +``` + +- Then create the second view for table columns +```sql +@AbapCatalog.sqlViewName: 'ZZ_I_DDIC_COL_CDS_V' +@AbapCatalog.compiler.compareFilter: true +@AbapCatalog.preserveKey: true +@AccessControl.authorizationCheck: #CHECK +@EndUserText.label: 'Open Metadata Column' +define view ZZ_I_DDIC_COL_CDS as select from dd03l l +left outer join dd03t t on + (l.tabname = t.tabname and l.fieldname = t.fieldname and l.as4local = t.as4local and t.ddlanguage = 'E') +{ +key l.tabname, +key l.fieldname, +key l.as4local, +key l.as4vers, +key l.position as POS, + @ObjectModel.readOnly: true + 'E' as LANG, + @ObjectModel.readOnly: true + l.keyflag, + @ObjectModel.readOnly: true + l.mandatory, + @ObjectModel.readOnly: true + l.checktable, + @ObjectModel.readOnly: true + l.inttype, + @ObjectModel.readOnly: true + l.intlen, + @ObjectModel.readOnly: true + l.reftable, + @ObjectModel.readOnly: true + l.precfield, + @ObjectModel.readOnly: true + l.reffield, + @ObjectModel.readOnly: true + l.notnull, + @ObjectModel.readOnly: true + l.datatype, + @ObjectModel.readOnly: true + l.leng, + @ObjectModel.readOnly: true + l.decimals, + @ObjectModel.readOnly: true + l.domname, + @ObjectModel.readOnly: true + l.comptype, + @ObjectModel.readOnly: true + l.reftype, + @ObjectModel.readOnly: true + t.ddtext +} +where l.as4local = 'A' +``` +### 3. SAP Gateway +Using the transaction `/nsegw` in SAPGUI, open the configuration screen for the SAP Gateway and create a new project with default project type. + +{% image +src="/images/v1.5/connectors/sap-erp/create-project.png" +alt="Create Project" +caption="Create Project" /%} + +Create a reference to the CDS views under Data Model and import the views. This is all that is needed to configure the OData details thanks to the CDS view annotations. + +{% image +src="/images/v1.5/connectors/sap-erp/add-reference.png" +alt="Add Reference" +caption="Add Reference" /%} + +The final step is to expose the generated code as OData service. This is the Register step. + +{% image +src="/images/v1.5/connectors/sap-erp/register-odata-service.png" +alt="Register odata Service" +caption="Register odata Service" /%} + +In the next screen click on Add Service and add the service as new OData endpoint. The service alias is the location where the SAP Gateway is installed. + +{% image +src="/images/v1.5/connectors/sap-erp/add-service-as-endpoint.png" +alt="Add Service As Endpoint" +caption="Add Service As Endpoint" /%} + + + diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/database/sap-erp/yaml.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/database/sap-erp/yaml.md new file mode 100644 index 000000000000..46d703d5a6dd --- /dev/null +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/connectors/database/sap-erp/yaml.md @@ -0,0 +1,198 @@ +--- +title: Run the SAP ERP Connector Externally +slug: /connectors/database/sap-erp/yaml +--- + +{% connectorDetailsHeader +name="SAP ERP" +stage="PROD" +platform="OpenMetadata" +availableFeatures=["Metadata", "dbt"] +unavailableFeatures=["Query Usage", "Stored Procedures", "Owners", "Tags","Data Profiler", "Data Quality", "View Lineage", "View Column-level Lineage"] +/ %} + +In this section, we provide guides and references to use the SAP ERP connector. + +Configure and schedule SAP ERP metadata workflow externally: + +- [Requirements](#requirements) +- [Metadata Ingestion](#metadata-ingestion) +- [dbt Integration](#dbt-integration) + +{% partial file="/v1.5/connectors/external-ingestion-deployment.md" /%} + +## Requirements + +{%inlineCallout icon="description" bold="OpenMetadata 1.1 or later" href="/deployment"%} +To deploy OpenMetadata, check the Deployment guides. +{%/inlineCallout%} + +To ingest the SAP ERP metadata, CDS Views and OData services need to be setup to efficiently expose SAP data. To achieve this, data must be exposed via RESTful interfaces. +Follow the guide [here](/connectors/database/sap-erp/setup-sap-apis) to setup the APIs. + + +### Python Requirements + +{% partial file="/v1.5/connectors/python-requirements.md" /%} + +To run the SAP ERP ingestion, you will need to install: + +```bash +pip3 install "openmetadata-ingestion[sap-erp]" +``` + +## Metadata Ingestion + +All connectors are defined as JSON Schemas. +[Here](https://github.com/open-metadata/OpenMetadata/blob/main/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/database/sapErpConnection.json) +you can find the structure to create a connection to SAP ERP. + +In order to create and run a Metadata Ingestion workflow, we will follow +the steps to create a YAML configuration able to connect to the source, +process the Entities if needed, and reach the OpenMetadata server. + +The workflow is modeled around the following +[JSON Schema](https://github.com/open-metadata/OpenMetadata/blob/main/openmetadata-spec/src/main/resources/json/schema/metadataIngestion/workflow.json) + +### 1. Define the YAML Config + +This is a sample config for SAP ERP: + +{% codePreview %} + +{% codeInfoContainer %} + +#### Source Configuration - Service Connection + +{% codeInfo srNumber=1 %} + +**hostPort**: Host and port of the SAP ERP service. This specifies the host and port of the SAP ERP instance. It should be specified as a string in the format `https://hostname.com`. + +{% /codeInfo %} + +{% codeInfo srNumber=2 %} + +**apiKey**: Api Key to authenticate the SAP ERP Apis + +{% /codeInfo %} + +{% codeInfo srNumber=3 %} + +**databaseName**: In OpenMetadata, the Database Service hierarchy works as follows: +`Database Service > Database > Schema > Table` +In the case of SAP ERP, we won't have a Database as such. If you'd like to see your data in a database named something other than `default`, you can specify the name in this field. + +{% /codeInfo %} + +{% codeInfo srNumber=4 %} + +**databaseSchema**: In OpenMetadata, the Database Service hierarchy works as follows: +`Database Service > Database > Schema > Table` +In the case of SAP ERP, we won't have a Database Schema as such. If you'd like to see your data in a database schema named something other than `default`, you can specify the name in this field. + +{% /codeInfo %} + +{% codeInfo srNumber=5 %} + +**paginationLimit**: Pagination limit used while querying the SAP ERP API for fetching the entities. + +{% /codeInfo %} + +{% partial file="/v1.5/connectors/yaml/database/source-config-def.md" /%} + +{% partial file="/v1.5/connectors/yaml/ingestion-sink-def.md" /%} + +{% partial file="/v1.5/connectors/yaml/workflow-config-def.md" /%} + +#### Advanced Configuration + +{% codeInfo srNumber=7 %} + +**Connection Options (Optional)**: Enter the details for any additional connection options that can be sent to database during the connection. These details must be added as Key-Value pairs. + +{% /codeInfo %} + +{% codeInfo srNumber=8 %} + +**Connection Arguments (Optional)**: Enter the details for any additional connection arguments such as security or protocol configs that can be sent to database during the connection. These details must be added as Key-Value pairs. + +- In case you are using Single-Sign-On (SSO) for authentication, add the `authenticator` details in the Connection Arguments as a Key-Value pair as follows: `"authenticator" : "sso_login_url"` + +{% /codeInfo %} + +{% /codeInfoContainer %} + +{% codeBlock fileName="filename.yaml" %} + +```yaml {% isCodeBlock=true %} +source: + type: SapErp + serviceName: + serviceConnection: + config: + type: SapErp +``` +```yaml {% srNumber=1 %} + hostPort: https://localhost.com +``` +```yaml {% srNumber=2 %} + apiKey: api_key +``` +```yaml {% srNumber=3 %} + databaseName: databaseName +``` +```yaml {% srNumber=4 %} + databaseSchema: databaseSchema +``` +```yaml {% srNumber=5 %} + paginationLimit: 100 +``` +```yaml {% srNumber=9 %} + # sslConfig: + # caCertificate: "path/to/ca/certificate" + # sslMode: disable #allow prefer require verify-ca verify-full +``` +```yaml {% srNumber=6 %} + # connectionOptions: + # key: value +``` +```yaml {% srNumber=7 %} + # connectionArguments: + # key: value +``` + +{% partial file="/v1.5/connectors/yaml/database/source-config.md" /%} + +{% partial file="/v1.5/connectors/yaml/ingestion-sink.md" /%} + +{% partial file="/v1.5/connectors/yaml/workflow-config.md" /%} + +{% /codeBlock %} + +{% /codePreview %} + +{% partial file="/v1.5/connectors/yaml/ingestion-cli.md" /%} + +## Securing SAP ERP Connection with SSL in OpenMetadata + +To configure SSL for secure connections between OpenMetadata and a Redshift database, Redshift offers various SSL modes, each providing different levels of connection security. + +When running the ingestion process externally, specify the SSL mode to be used for the Redshift connection, such as `prefer`, `verify-ca`, `allow`, and others. Once you've chosen the SSL mode, provide the CA certificate for SSL validation (`caCertificate`). Only the CA certificate is required for SSL validation in Redshift. + +```yaml + sslMode: disable #allow prefer require verify-ca verify-full + sslConfig: + caCertificate: "/path/to/ca/certificate" +``` + +## dbt Integration + +{% tilesContainer %} + +{% tile + icon="mediation" + title="dbt Integration" + description="Learn more about how to ingest dbt models' definitions and their lineage." + link="/connectors/ingestion/workflows/dbt" /%} + +{% /tilesContainer %} diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/deployment/ingestion/openmetadata.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/deployment/ingestion/openmetadata.md index cc1062be2452..aea8c98b8457 100644 --- a/openmetadata-docs/content/v1.6.x-SNAPSHOT/deployment/ingestion/openmetadata.md +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/deployment/ingestion/openmetadata.md @@ -119,8 +119,8 @@ openmetadata: ## Custom Airflow Installation {% note %} -- Note that the `openmetadata-ingestion` only supports Python versions 3.7, 3.8 and 3.9. -- The supported Airflow versions are 2.3, 2.4 and 2.5. From release 1.1.1 onwards, OpenMetadata will also support Airflow 2.6. +- Note that the `openmetadata-ingestion` only supports Python versions 3.7, 3.8, 3.9 and 3.10. +- - The supported Airflow versions are 2.3, 2.4, 2.5, 2.6, and 2.7. Starting from release 1.5, OpenMetadata will support compatibility with Airflow versions up to 2.9. {% /note %} You will need to follow three steps: diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/admin-guide/how-to-ingest-metadata.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/admin-guide/how-to-ingest-metadata.md index c71aa02abbc8..c8e598c9fb67 100644 --- a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/admin-guide/how-to-ingest-metadata.md +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/admin-guide/how-to-ingest-metadata.md @@ -47,7 +47,17 @@ Let’s start with an example of fetching metadata from a database service, i.e. - Start by creating a service connection by clicking on **Settings** from the left nav bar. Navigate to the **Services** section, and click on **Databases**. Click on **Add New Service**. {% image - src="/images/v1.5/how-to-guides/admin-guide/connector1.jpg" + src="/images/v1.5/how-to-guides/admin-guide/connector1.png" + alt="Create a Service Connection" + caption="Create a Service Connection" + /%} +{% image + src="/images/v1.5/how-to-guides/admin-guide/connector1.1.png" + alt="Create a Service Connection" + caption="Create a Service Connection" + /%} +{% image + src="/images/v1.5/how-to-guides/admin-guide/connector1.2.png" alt="Create a Service Connection" caption="Create a Service Connection" /%} diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/automation/set-up-automation.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/automation/set-up-automation.md index a4afc9e61f46..a7b5e1f74718 100644 --- a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/automation/set-up-automation.md +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/automation/set-up-automation.md @@ -27,11 +27,19 @@ caption="Add Automation" /%} ### Step 3: Fill in Automation Details + In the pop-up window, provide the necessary information to set up the automation: + - **Automation Name**: Give a meaningful name to the automation for easy identification. - **Description**: Add a brief description explaining what this automation will do (e.g., "Daily metadata ingestion for database XYZ"). -- **Logic/Conditions**: Define any conditions or specific criteria needed for this automation to work (e.g., specific tables or columns to be included). - Ensure that the logic is set up as per your specific requirements to make the automation useful for your workflows. +- **Logic/Conditions**: Define any conditions or specific criteria needed for this automation to work (e.g., specific tables or columns to be included). Ensure that the logic is set up as per your specific requirements to make the automation effective for your workflows. + +Additionally, use the **Apply to Child** option to add a list of **tags** and **glossary terms** to selected assets at the column level: + - If a list of columns is specified, tags will only be applied to columns with matching names. + - By default, incoming tags will merge with existing tags. + - To overwrite existing tags with the new list, select the **Overwrite Metadata** option, replacing any previous tags with the incoming ones. + +Ensure each setting aligns with your automation requirements to maximize efficiency. {% image src="/images/v1.6/how-to-guides/governance/automation-4.png" diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/classification/auto.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/classification/auto.md index 3563b4b463d7..d91cf401005f 100644 --- a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/classification/auto.md +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/classification/auto.md @@ -7,7 +7,7 @@ slug: /how-to-guides/data-governance/classification/auto OpenMetadata identifies PII data and auto tags or suggests the tags. The data profiler automatically tags the PII-Sensitive data. The addition of tags about PII data helps consumers and governance teams identify data that needs to be treated carefully. -In the example below, the columns ‘user_name’ and ‘social security number’ are auto-tagged as PII-sensitive. This works using NLP as part of the profiler during ingestion. +In the example below, the columns ‘last_name’ and ‘social security number’ are auto-tagged as PII-sensitive. This works using NLP as part of the profiler during ingestion. {% image src="/images/v1.5/how-to-guides/governance/auto1.png" @@ -15,7 +15,7 @@ alt="User_name and Social Security Number are Auto-Classified as PII Sensitive" caption="User_name and Social Security Number are Auto-Classified as PII Sensitive" /%} -In the below example, the column ‘dwh_x10’ is also auto-tagged as PII Sensitive, even though the column name does not provide much information. +In the below example, the column ‘number_of_orders’ is also auto-tagged as Sensitive, even though the column name does not provide much information. {% image src="/images/v1.5/how-to-guides/governance/auto2.png" @@ -23,7 +23,7 @@ alt="Column Name does not provide much information" caption="Column Name does not provide much information" /%} -When we look at the content of the column ‘dwh_x10’ in the Sample Data tab, it becomes clear that the auto-classification is based on the data in the column. +When we look at the content of the column ‘number_of_orders’ in the Sample Data tab, it becomes clear that the auto-classification is based on the data in the column. {% image src="/images/v1.5/how-to-guides/governance/auto3.png" diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/glossary/glossary-term.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/glossary/glossary-term.md index 5cad5c28335f..f048d0e99014 100644 --- a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/glossary/glossary-term.md +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-governance/glossary/glossary-term.md @@ -31,7 +31,7 @@ The glossary term can include additional information as follows: - **Assets** - After creating a glossary term, data assets can be associated with the term, which helps in data discovery. {% image -src="/images/v1.5/how-to-guides/governance/glossary-term.png" +src="/images/v1.6/how-to-guides/governance/glossary-term.png" alt="Glossary Term Requirements" caption="Glossary Term Requirements" /%} @@ -47,7 +47,7 @@ The details of a Glossary Term in OpenMetadata are displayed in three tabs: Over The **Overview tab** displays the details of the term, along with the synonyms, related terms, references, and tags. It also displays the Owner and the Reviewers for the Glossary Term. {% image -src="/images/v1.5/how-to-guides/governance/term1.png" +src="/images/v1.6/how-to-guides/governance/term1.png" alt="Overview of a Glossary Term" caption="Overview of a Glossary Term" /%} @@ -57,17 +57,17 @@ caption="Overview of a Glossary Term" The **Glossary Term Tab** displays all the child terms associated with the parent term. You can also add more child terms from this tab. {% image -src="/images/v1.5/how-to-guides/governance/term2.png" +src="/images/v1.6/how-to-guides/governance/term2.png" alt="Glossary Terms Tab" caption="Glossary Terms Tab" /%} ### Assets Tab -The **Assets Tab** displays all the assets that are associated with the glossary term. These data assets are further subgrouped as Tables, Topics, Dashboards. The right side panel shows a preview of the data assets selected. +The **Assets Tab** displays all the assets that are associated with the glossary term. These data assets are further subgrouped on the basis of databases. The right side panel shows a preview of the data assets selected. {% image -src="/images/v1.5/how-to-guides/governance/term3.png" +src="/images/v1.6/how-to-guides/governance/term3.png" alt="Assets Tab" caption="Assets Tab" /%} @@ -77,7 +77,7 @@ You can add more assets by clicking on **Add > Assets**. You can further search {% note %} **Pro Tip:** The Global Search in OpenMetadata also helps discover related Glossary Terms and Tags. {% image -src="/images/v1.5/how-to-guides/governance/tag1.png" +src="/images/v1.6/how-to-guides/governance/tag1.png" alt="Search for Glossary Terms and Tags" caption="Search for Glossary Terms and Tags" /%} @@ -88,7 +88,7 @@ caption="Search for Glossary Terms and Tags" The glossary as well as the terms maintain a version history, which can be viewed on the top right. Clicking on the number will display the details of the **Version History**. {% image -src="/images/v1.5/how-to-guides/governance/version.png" +src="/images/v1.6/how-to-guides/governance/version.png" alt="Glossary Term Version History" caption="Glossary Term Version History" /%} diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-insights/report.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-insights/report.md index d76a171d9353..e6f68d0a6c61 100644 --- a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-insights/report.md +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-insights/report.md @@ -14,23 +14,33 @@ OpenMetadata offers a suite of reports providing platform analytics around speci - Cost Analysis (Collate only) {% image -src="/images/v1.5/how-to-guides/insights/insights1.png" +src="/images/v1.6/how-to-guides/insights/insights1.png" alt="Data Insights Report" caption="Data Insights Report" /%} All the reports can be filtered by **Teams, Data Tiers, and a Time Filter**. {% image -src="/images/v1.5/how-to-guides/insights/insights2.png" -alt="Data Insights Report Filters: Team, Tier, Time" -caption="Data Insights Report Filters: Team, Tier, Time" +src="/images/v1.6/how-to-guides/insights/insights2.png" +alt="Data Insights Report Filter: Team" +caption="Data Insights Report Filter: Team" +/%} +{% image +src="/images/v1.6/how-to-guides/insights/insights2.1.png" +alt="Data Insights Report Filter: Tier" +caption="Data Insights Report Filter: Tier" +/%} +{% image +src="/images/v1.6/how-to-guides/insights/insights2.2.png" +alt="Data Insights Report Filter: Time" +caption="Data Insights Report Filter: Time" /%} ## Data Assets Report The Data Asset reports display important metrics around your data assets in OpenMetadata. This report also displays the organizational health at a glance with details on the Total Data Assets, Data Assets with Description, Owners, and Tiers. {% image -src="/images/v1.5/how-to-guides/insights/ohg.png" +src="/images/v1.6/how-to-guides/insights/ohg.png" alt="Organization Health at a Glance" caption="Organization Health at a Glance" /%} @@ -40,7 +50,7 @@ caption="Organization Health at a Glance" This chart represents the total number of data assets present in OpenMetadata. It offers a view of your data assets broken down by asset type (i.e. DatabaseSchema, Database, Dashboard, Chart, Topic, ML Model, etc.) {% image -src="/images/v1.5/how-to-guides/insights/tda.png" +src="/images/v1.6/how-to-guides/insights/tda.png" alt="Total Data Assets" caption="Total Data Assets" /%} @@ -50,7 +60,7 @@ caption="Total Data Assets" It displays the percentage of data assets with description by data asset type. For Table asset type, this condition is true only if the table and column description are filed. It allows you to quickly view the description coverage for your data assets in OpenMetadata. {% image -src="/images/v1.5/how-to-guides/insights/pdad.png" +src="/images/v1.6/how-to-guides/insights/pdad.png" alt="Percentage of Data Assets with Description" caption="Percentage of Data Assets with Description" /%} @@ -60,7 +70,7 @@ caption="Percentage of Data Assets with Description" This chart represents the percentage of data assets present in OpenMetadata with an owner assigned. Data assets that do not support assigning an owner will not be counted in this percentage. It allows you to quickly view the ownership coverage for your data assets in OpenMetadata. {% image -src="/images/v1.5/how-to-guides/insights/pdao.png" +src="/images/v1.6/how-to-guides/insights/pdao.png" alt="Percentage of Data Assets with Owners" caption="Percentage of Data Assets with Owners" /%} @@ -70,7 +80,7 @@ caption="Percentage of Data Assets with Owners" This chart displays the percentage of data assets with description as ingested from each service. The search filter helps to narrow down the results and look for information by the required services. {% image -src="/images/v1.5/how-to-guides/insights/psd.png" +src="/images/v1.6/how-to-guides/insights/psd.png" alt="Percentage of Service with Description" caption="Percentage of Service with Description" /%} @@ -80,7 +90,7 @@ caption="Percentage of Service with Description" This chart displays the percentage of data assets with Owners as ingested from each service. The search filter helps to narrow down the results and look for information by the required services. {% image -src="/images/v1.5/how-to-guides/insights/pso.png" +src="/images/v1.6/how-to-guides/insights/pso.png" alt="Percentage of Service with Owners" caption="Percentage of Service with Owners" /%} @@ -90,7 +100,7 @@ caption="Percentage of Service with Owners" It displays a broken down view of data assets by Tiers. Data Assets with no tiers assigned are not included in this. It allows you to quickly view the breakdown of data assets by tier. {% image -src="/images/v1.5/how-to-guides/insights/tdat.png" +src="/images/v1.6/how-to-guides/insights/tdat.png" alt="Total Data Assets by Tier" caption="Total Data Assets by Tier" /%} @@ -100,7 +110,7 @@ caption="Total Data Assets by Tier" App analytics helps to track user engagement. This report provides important metrics around the usage of OpenMetadata. This report also displays the organizational health at a glance with details on the Page Views by Data Assets, Daily Active Users on the Platform, and the Most Active User. {% image -src="/images/v1.5/how-to-guides/insights/ohg2.png" +src="/images/v1.6/how-to-guides/insights/ohg2.png" alt="Organization Health at a Glance" caption="Organization Health at a Glance" /%} @@ -110,7 +120,7 @@ caption="Organization Health at a Glance" Know the 10 most viewed data assets in your platform. It offers a quick view to identify the data assets of the most interest in your organization. {% image -src="/images/v1.5/how-to-guides/insights/mvda.png" +src="/images/v1.6/how-to-guides/insights/mvda.png" alt="Most Viewed Data Assets" caption="Most Viewed Data Assets" /%} @@ -120,7 +130,7 @@ caption="Most Viewed Data Assets" It helps to understand the total number of page views by asset type. This allows you to understand which asset family drives the most interest in your organization {% image -src="/images/v1.5/how-to-guides/insights/pvda.png" +src="/images/v1.6/how-to-guides/insights/pvda.png" alt="Page Views by Data Assets" caption="Page Views by Data Assets" /%} @@ -130,7 +140,7 @@ caption="Page Views by Data Assets" Active users are users with at least one session. This report allows to understand the platform usage and see how your organization leverages OpenMetadata. {% image -src="/images/v1.5/how-to-guides/insights/daup.png" +src="/images/v1.6/how-to-guides/insights/daup.png" alt="Daily Active Users on the Platform" caption="Daily Active Users on the Platform" /%} @@ -140,7 +150,7 @@ caption="Daily Active Users on the Platform" This report displays the most active users on the platform based on Page Views. They are the power users in your data team. {% image -src="/images/v1.5/how-to-guides/insights/mau.png" +src="/images/v1.6/how-to-guides/insights/mau.png" alt="Most Active Users" caption="Most Active Users" /%} @@ -150,7 +160,7 @@ caption="Most Active Users" While data insights reports gives an analytical view of the OpenMetadata platform, KPIs are here to drive platform adoption. The below report displays the percentage coverage of description and ownership of the data assets. {% image -src="/images/v1.5/how-to-guides/insights/kpi.png" +src="/images/v1.6/how-to-guides/insights/kpi.png" alt="Key Performance Indicators (KPI)" caption="Key Performance Indicators (KPI)" /%} @@ -164,7 +174,7 @@ These reports are displayed for Collate SaaS users. A lot of money is invested i This report points out which data assets are getting a lot of use and which ones are not. {% image -src="/images/v1.5/how-to-guides/insights/uuac.png" +src="/images/v1.6/how-to-guides/insights/uuac.png" alt="Used vs Unused Assets Count" caption="Used vs Unused Assets Count" /%} @@ -178,7 +188,7 @@ This report displays how much data is being stored and how many terabytes of it It displays the size of the used vs unused assets over time in percentage. {% image -src="/images/v1.5/how-to-guides/insights/uuasp.png" +src="/images/v1.6/how-to-guides/insights/uuasp.png" alt="Used vs Unused Assets Size Percentage" caption="Used vs Unused Assets Size Percentage" /%} @@ -188,7 +198,7 @@ caption="Used vs Unused Assets Size Percentage" It displays the count of the used vs unused assets over time in percentage. {% image -src="/images/v1.5/how-to-guides/insights/uuacp.png" +src="/images/v1.6/how-to-guides/insights/uuacp.png" alt="Used vs Unused Assets Count Percentage" caption="Used vs Unused Assets Count Percentage" /%} @@ -198,7 +208,7 @@ caption="Used vs Unused Assets Count Percentage" A list of the unused assets is displayed. {% image -src="/images/v1.5/how-to-guides/insights/ua.png" +src="/images/v1.6/how-to-guides/insights/ua.png" alt="Unused Assets" caption="Unused Assets" /%} @@ -208,7 +218,7 @@ caption="Unused Assets" A list of the Frequently Used Assets is displayed. {% image -src="/images/v1.5/how-to-guides/insights/fau.png" +src="/images/v1.6/how-to-guides/insights/fau.png" alt="Frequently Used Assets" caption="Frequently Used Assets" /%} diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-lineage/workflow.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-lineage/workflow.md index 518ff39e782a..db9e4f1fc1fb 100644 --- a/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-lineage/workflow.md +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/how-to-guides/data-lineage/workflow.md @@ -14,7 +14,7 @@ Once the metadata ingestion runs correctly, and we are able to explore the servi If the database has views, then the view lineage would be generated automatically, along with the column-level lineage. In such a case, the table type is **View** as shown in the example below. {% image - src="/images/v1.5/how-to-guides/lineage/view.png" + src="/images/v1.6/how-to-guides/lineage/view.png" alt="View Lineage through Metadata Ingestion" caption="View Lineage through Metadata Ingestion" /%} @@ -24,16 +24,28 @@ Apart from the Metadata ingestion, we can create a workflow that will obtain the ### 1. Add a Lineage Ingestion -Navigate to **Settings >> Services**. Select the required service +Navigate to **Settings >> Services >> Databases**. Select the required service {% image - src="/images/v1.5/how-to-guides/lineage/wkf1.png" + src="/images/v1.6/how-to-guides/lineage/wkf1.png" alt="Select a Service" caption="Select a Service" /%} + {% image + src="/images/v1.6/how-to-guides/lineage/wkf1.1.png" + alt="Click on Databases" + caption="Click on Databases" + /%} + + {% image + src="/images/v1.6/how-to-guides/lineage/wkf1.2.png" + alt="Select the Database" + caption="Select the Database" + /%} + Go the the **Ingestions** tab. Click on **Add Ingestion** and select **Add Lineage Ingestion**. {% image - src="/images/v1.5/how-to-guides/lineage/wkf2.png" + src="/images/v1.6/how-to-guides/lineage/wkf2.png" alt="Add a Lineage Ingestion" caption="Add a Lineage Ingestion" /%} @@ -42,7 +54,7 @@ Go the the **Ingestions** tab. Click on **Add Ingestion** and select **Add Linea Here you can enter the Lineage Ingestion details: {% image - src="/images/v1.5/how-to-guides/lineage/wkf3.png" + src="/images/v1.6/how-to-guides/lineage/wkf3.png" alt="Configure the Lineage Ingestion" caption="Configure the Lineage Ingestion" /%} @@ -61,7 +73,7 @@ Here you can enter the Lineage Ingestion details: After clicking Next, you will be redirected to the Scheduling form. This will be the same as the Metadata Ingestion. Select your desired schedule and click on Deploy to find the lineage pipeline being added to the Service Ingestions. {% image - src="/images/v1.5/how-to-guides/lineage/wkf4.png" + src="/images/v1.6/how-to-guides/lineage/wkf4.png" alt="Schedule and Deploy the Lineage Ingestion" caption="Schedule and Deploy the Lineage Ingestion" /%} @@ -90,4 +102,4 @@ Lineage can also be added and edited manually in OpenMetadata. Refer for more in icon="MdArrowForward" href="/how-to-guides/data-lineage/explore"%} Explore the rich lineage view in OpenMetadata. -{%/inlineCallout%} \ No newline at end of file +{%/inlineCallout%} diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/menu.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/menu.md index c989ab2abaae..e5f3ec5e343f 100644 --- a/openmetadata-docs/content/v1.6.x-SNAPSHOT/menu.md +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/menu.md @@ -372,6 +372,12 @@ site_menu: url: /connectors/database/sap-hana - category: Connectors / Database / SAP Hana / Run Externally url: /connectors/database/sap-hana/yaml + - category: Connectors / Database / SAP ERP + url: /connectors/database/sap-erp + - category: Connectors / Database / SAP ERP / Run Externally + url: /connectors/database/sap-erp/yaml + - category: Connectors / Database / SAP ERP / Setup SAP ERP APIs + url: /connectors/database/sap-erp/setup-sap-apis - category: Connectors / Database / SAS url: /connectors/database/sas - category: Connectors / Database / SAS / Run Externally diff --git a/openmetadata-docs/content/v1.6.x-SNAPSHOT/releases/releases/index.md b/openmetadata-docs/content/v1.6.x-SNAPSHOT/releases/releases/index.md index f505c15032cd..fe825c9835d3 100644 --- a/openmetadata-docs/content/v1.6.x-SNAPSHOT/releases/releases/index.md +++ b/openmetadata-docs/content/v1.6.x-SNAPSHOT/releases/releases/index.md @@ -18,8 +18,53 @@ version. To see what's coming in next releases, please check our [Roadmap](/rele ## Breaking Changes -- The ingestion Framework now uses the OpenMetadata Ingestion Service Specificaiton (OMISS) to specify -enrtypoints to ingestion operations. [Click here](./todo-need-link) for more info. +- The ingestion Framework now uses the OpenMetadata Ingestion Service Specification (OMISS) to specify + entrypoints to ingestion operations. [Click here](./todo-need-link) for more info. + +# 1.5.9 Release + +{% note noteType="Tip" %} +**Oct 29th, 2024** +{% /note %} + +You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.5.9-release). + +# What's Changed + +- Prepare App Framework to handle application limits +- Add Query Builder widget +- Revamp MetaPilot as Collate AI and add limits (Collate only) +- Fix EntityLink for names with brackets +- Fix backend database Azure auth +- Mask Greenplum secrets on the UI + +**Full Changelog**: https://github.com/open-metadata/OpenMetadata/compare/1.5.8-release...1.5.9-release + +# 1.5.8 Release + +{% note noteType="Tip" %} +**Oct 23rd, 2024** +{% /note %} + +You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.5.8-release). + +# What's Changed + +- Minor: Add location path to a table entity. +- Minor: Do not include soft deleted assets in the Data Insight. +- Minor: Supported total unique user count on the Team page. +- Fix: Add Azure Token Base Authentication +- Fix: Hive Meta store connection issue. +- Fix: Issues in zh language search index mapping. +- Fix: Live index is on test suite creation. +- Fix: LocationPath Index. +- Fix: Mode dashboard ingestion API call. +- Fix: Mode test connection returns data in dict instead of JSON. +- Fix: Quicksight lineage source. +- Fix: Task deserialization in Airflow metadata ingestion. +- Fix: Web analytic activity being reset. + +**Full Changelog**: https://github.com/open-metadata/OpenMetadata/compare/1.5.7-release...1.5.8-release # 1.5.7 Release @@ -101,8 +146,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Improved AlationSink connector. - Fixed sktime version to fix AUT - Fixed Expected ColumnLineage but got dict -- Improved Collate API with Knowledge Center routes. ${CollateIconWithLinkMD} - +- Improved Collate API with Knowledge Center routes. ${CollateIconWithLinkMD} **Full Changelog**: https://github.com/open-metadata/OpenMetadata/compare/1.5.5-release...1.5.6-release @@ -154,6 +198,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # What's Changed ## OpenMetadata + - Hotfix to the Term Aggregation size on Data Insights - ES pagination with error handling - Updated Domain in Docker Compose & Docs @@ -165,6 +210,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Unable to access the import glossary page ## Collate + - Fix token limitations using config - Fix Automator pagination - Fix MetaPilot push for no constraint @@ -234,17 +280,20 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # Backward Incompatible Changes ## Multi Owners + OpenMetadata allows a single user or a team to be tagged as owners for any data assets. In Release 1.5.1, we allow users to tag multiple individual owners or a single team. This will allow organizations to add ownership to multiple individuals without necessarily needing to create a team around them like previously. This is a backward incompatible change, if you are using APIs, please make sure the owner field is now changed to “owners” ## Import/Export Format + To support the multi-owner format, we have now changed how we export and import the CSV file in glossary, services, database, schema, table, etc. The new format will be user:userName;team:TeamName If you are importing an older file, please make sure to make this change. ## Pydantic V2 + The core of OpenMetadata are the JSON Schemas that define the metadata standard. These schemas are automatically translated into Java, Typescript, and Python code with Pydantic classes. In this release, we have [migrated](https://docs.pydantic.dev/latest/migration/) the codebase from Pydantic V1 to Pydantic V2. @@ -294,14 +343,17 @@ In the latest release, several updates and enhancements have been made to the JS - **Tableau**: Enhanced support by adding capabilities for connecting with `TableauPublishedDatasource` and `TableauEmbeddedDatasource`, providing more granular control over data visualization and reporting. ## Include DDL + During the Database Metadata ingestion, we can optionally pick up the DDL for both tables and views. During the metadata ingestion, we use the view DDLs to generate the View Lineage. To reduce the processing time for out-of-the-box workflows, we are disabling the include DDL by default, whereas before, it was enabled, which potentially led to long-running workflows. ## Secrets Manager + Starting with the release 1.5.1, the JWT Token for the bots will be sent to the Secrets Manager if you configured one. It won't appear anymore in your dag_generated_configs in Airflow. ## Python SDK + The `metadata insight` command has been removed. Since Data Insights application was moved to be an internal system application instead of relying on external pipelines the SDK command to run the pipeline was removed. # What's New @@ -326,11 +378,13 @@ We also have improved the Table Data quality dashboard to showcase the tests cat {% youtube videoId="bXcQBtZuyoU" start="0:00" end="2:10" width="560px" height="315px" /%} ## Freshness Data Quality Tests (Collate) + Working with old data can lead to making wrong decisions. With the new Freshness test, you can validate that your data arrives at the right time. Freshness tests are a critical part of any data team's toolset. Bringing these tests together with lineage information and the Incident Manager, your team will be able to quickly detect issues related to missing data or stuck pipelines. {% youtube videoId="QRcR3m9cCGo" start="0:00" end="1:09" width="560px" height="315px" /%} ## Data Diff Data Quality Tests + Data quality checks are important not only within a single table but also between different tables. These data diff checks can ensure key data remains unchanged after transformation, or conversely, ensure that the transformations were actually performed. We are introducing the **table difference data quality test** to validate that multiple appearances of the same information remain consistent. Note that the test allows you to specify which column to use as a key and which columns you want to compare, and even add filters in the data to give you more control over multiple use cases. @@ -338,11 +392,13 @@ We are introducing the **table difference data quality test** to validate that m {% youtube videoId="oxZVS_UGrE4" start="0:00" end="2:22" width="560px" height="315px" /%} ## Domains RBAC & Subdomains + OpenMetadata introduced Domains & Data Products in 1.3.0. Since then, many large organizations have started using Domains & Data Products to achieve better ownership and collaboration around domains that can span multiple teams. In the 1.5.1 release, we added support for subdomains. This will help teams to organize into multiple subdomains within each domain. ### RBAC for Domains + With the 1.5.1 release, we are adding more stricter controls around Domain. Now, teams, data assets, glossaries, and classification can have domain concepts and can get a policy such that only users within a domain can access the data within a domain. Domain owners can use Data Products to publish data products and showcase publicly available data assets from a specific domain. This will help large companies to use a single OpenMetadata platform to unify all of their data and teams but also provide more stringent controls to segment the data between domains @@ -350,6 +406,7 @@ This will help large companies to use a single OpenMetadata platform to unify al {% youtube videoId="r-_HaewjgTQ" start="0:00" end="0:44" width="560px" height="315px" /%} ## Improved Explore Page & Data Asset Widget + OpenMetadata, with its simple UI/UX and data collaboration features, is becoming more attractive to non-technical users as well. Data Governance teams are using OpenMetadata to add glossary terms and policies around metadata. Teams using Collate SaaS product are taking advantage of our Automations feature to gain productivity in their governance tasks. Our new improved navigation on the Explore page will help users navigate hierarchically and find the data they are looking for. Users will see the data assets now grouped by `service name -> database -> schema -> tables/stored procedures`. @@ -359,11 +416,13 @@ We are also making the discovery of data more accessible for users introducing a {% youtube videoId="45ekUIRO1Ec" start="0:00" end="1:11" width="560px" height="315px" /%} ## Pipeline Status Widget + We are also adding another widget you can use to customize the Landing Page of the User Personas in your organization. With the Pipeline Status widget, Data Engineers can easily track the pipelines that are not behaving as expected. This widget, together with the obervability alerts that are already in place, will help your teams jump even faster to solving any issues in the platform. ## API as Data Asset + The Internet runs using APIs, both producing and consuming data. Organizations today run many microservices and REST APIs to capture data from their users and update a transaction database in the backend. On top of the many supported connectors across Databases, Dashboards, ML Models, etc. We believe that providing support for API Services as data assets will help to get the full picture of how the data is coming through from various services and landing into databases, going to warehouses and BI tools. @@ -373,20 +432,25 @@ In 1.5.1 we are introducing APIs as another first-class entity. Teams can now ca {% youtube videoId="b9wrVnM3u80" start="0:00" end="0:33" width="560px" height="315px" /%} ## Glossary Improvements + OpenMetadata supports multiple glossaries, an import/export and review process, and bulk asset tagging with glossary terms. Many teams are taking advantage of these features, and with an amazing open-source community, we are receiving great feedback on improving glossary functionality. Here are some of the improvements coming in 1.5.1: + 1. Glossary Reviewers can be teams 2. Updating a glossary will enforce a re-review 3. Renaming the Glossary Term while it's under review will keep the task associated with it open ## Data Insights (Collate) + The Data Insights application is meant to give you a quick glance of your data's state and allow you to take action based on the information you receive. To continue pursuing this objective, the application was completely refactored to allow customizability. This is achieved by the possibility of now creating custom dashboards. On this release you can create charts based on your data assets metadata based on your needs. ## Ingestion Connectors + 80+ connectors to help teams to centralize metadata. We continue to push the boundaries of this mission, in + - **Apache Flink** as a Pipeline Connector - **SAP ERP**, after a long and successful collaboration with our community and SAP experts - **Teradata** as a community contribution from [gbpy](https://github.com/gpby) to broaden the integration capabilities for enterprise-scale analytics and data management. @@ -400,7 +464,7 @@ To continue pursuing this objective, the application was completely refactored t **August 6th, 2024** {% /note %} -- Make `Include ddl` disabled by default +- Make `Include ddl` disabled by default - Made DDL configuration consistent with views - Fix user profile task listing. - Fix import/export UI flow ${CollateIconWithLinkMD}. @@ -550,11 +614,13 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta ## Backward Incompatible Changes ### Tooling + - Metadata Backup/Recovery is deprecated. No further support will be provided. - Users are advised to use database native tools to backup and store it in their object store for recovery. - `bootstrap/bootstrap_storage.sh` has been deprecated in favor of bootstrap/openmetadata-ops.sh ### UI + - Activity has been improved. New update specific cards display critical information such as data quality test case updates, description, tag update or removal. - For Lineage, the Expand All button has been removed. A new Layers button is introduced at the bottom left corner. With the Layers button, you can add Column Level Lineage or Data Observability details to your Lineage view. - View Definition is now renamed as Schema Definition. @@ -562,6 +628,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - For Classification, users can set classification to be mutually exclusive only at the time of creation. Once created, you cannot change it back to mutually non-exclusive or vice-versa. This is to prevent conflicts of adding multiple tags that belong to same classification and later turning the mutually exclusive flag back to true. ### API + - Table Schema's `ViewDefinition` is now renamed to `SchemaDefinition` to capture Tables' Create Schema. - Bulk Import API now creates entities if they are not present during the import. - Table's TestSuite is migrated to EntityReference. Previously it used to store entire payload of TestSuite. @@ -585,6 +652,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - APIs are available in OSS. ## Data Quality Improvements + {% youtube videoId="UNOHvBMVcYM" start="0:00" end="1:28" width="560px" height="315px" /%} - The Table schema page now shows the Data Quality tests for each column. @@ -595,17 +663,20 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Support has been added for an empty string as a missing count. ## Data Profiler + - Implemented a global profiler configuration page, allowing admin to exclude certain metric computations for specific data types. - Added profiler support for Redshift complex types and DynamoDB. - Fixed an issue with performing sum operations for large values in profiler ingestion. - Fixed the histogram unit's issues with scientific notation. ## Incident Manager + - We now display a sample of failed rows for the latest failed test cases. Once the issue is resolved, the failed sample will be deleted. (Collate Only) - Fixed the Date time filter for the Incident Manager. - Notifications are sent for the tasks created by the Incident Manager. ## Lineage Improvements + https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Column Lineage Search @@ -630,10 +701,12 @@ https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Support has been added for dynamic tables. ## Data Insights + - Previously, the data insights reports displayed only the percentage coverage of ownership and description. Now, users can drill down to view the data assets with no owner or description. - Improved the UX for data insight filters. ## Cost Analysis (Collate Only) + - Lifecycle data for Cost Analysis has been implemented for BigQuery, Snowflake, and Redshift. ## Custom Theme @@ -651,10 +724,12 @@ https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Added a Data Quality Widget to list the summary of data quality tests belonging to a user or their team. ## Ingestion Performance Improvements + - Bigquery, Redshift, and Snowflake now support incremental metadata ingestions by scanning DML operations on the query history. - Database Services now support parallelizing the metadata ingestion at each schema. ## Connectors + - Now supports a new connector for QlikCloud. - New Kafka Connect connector - We now parse complex protobuf schemas for Kafka @@ -663,7 +738,7 @@ https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Added an option to include or exclude paused pipelines in Airflow. - Revamped SSL support to allow users to upload the required certificates directly in the UI. - The character support has been enhanced for tag ingestion to include /. -- In the Oracle connector, we rolled back to use all_ tables instead of dba_. +- In the Oracle connector, we rolled back to use all* tables instead of dba*. - Added support for Azure auth in Trino. - For QlikSense, we have added an option to disable SSL validation. @@ -674,25 +749,31 @@ https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Custom Properties now allow linking other assets in the platform, such as Tables, Dashboards, etc. To enable this, create a Custom Property as an Entity Reference or Entity Reference List. ## Glossary + - The glossary term parent can now be changed from the Details page. - On the data assets page, glossary terms are displayed by hierarchy. ## Alerts & Notification Improvements + - The Activity Feed provides more contextual information, removing the need to move to entity pages. - Alerts give more accurate information about the entity, as well as conversations and tasks. ## Localization + - Fixed localization issues in the confirmation logic for the delete function. - Fixed the search index language configuration. ## Roles + - Now, roles can be inherited from the user configuration in SSO. ## Search + - You can now filter by assets without a description or an owner. - Improved the match results for search results. ## Others + - The description is auto-expanded when the data asset has no data and has the space to accommodate a lengthy description. - User email IDs have been masked and are only visible to Admins. - Users can filter Queries by owner, tag, and creation date in the UI. @@ -703,7 +784,6 @@ https://www.youtube.com/watch?v=KZdVb8DiHJs - Video on Column Lineage Search - Redundant scroll bars have been removed from the UI. - Improved the bot role binding to provide more control over which roles are passed to the system bots. - # 1.3.4 Release 🎉 {% note noteType="Tip" %} @@ -742,21 +822,25 @@ Learn how to upgrade your OpenMetadata instance to 1.3.3! You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.3.2-release). ## MetaPilot (Collate) + - New MetaPilot application shipped in preview mode. Try it out in the [Sandbox](https://sandbox.open-metadata.org/)! - Get automatically generated descriptions with GenAI. Now it’s easier than ever to document your data assets. - Chat with the MetaPilot and get SQL queries to help you extract relevant information from your data assets. - Let the MetaPilot help you understand and improve the queries used on your tables. ## Authentication Flow + - Added generic support for OIDC Authentication. This is SSO provider-agnostic. - You can now integrate Confidential Clients to manage the server authentication. - Now, the session renewal happens automatically in the backend. ## Data Quality + - Pagination support was added for the Data Quality tab for data assets. - Fixed an issue with execution summary timeout issue for the data quality test. ## Connectors + - New Bigtable connector. - Now, users can configure the external sample data storage path. - Added lineage support for Snowflake materialized view and masking policies. @@ -767,6 +851,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Improve PATCH generation for array fields. ## Other Changes + - Avoid creating duplicated queries. - Speed up the server start time by moving the Secrets Manager Migration to the migration container. - Fixed the issue with the date filter for the Incident Manager. @@ -775,6 +860,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fixed an issue with search indexing. - Fixed the missing input field for conversation source for alerts and notifications. - Filter dashboards by a project on the Explore page. + --- **Full Changelog**: [link](https://github.com/open-metadata/OpenMetadata/compare/1.3.1-release...1.3.2-release) @@ -787,30 +873,36 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta You can find the GitHub release [here](https://github.com/open-metadata/OpenMetadata/releases/tag/1.3.1-release). {% /note %} - ## Knowledge Center (Collate) + - Supports drag and drop for the hierarchy of knowledge articles. - Enhanced the layout and loading experience of the knowledge page. ## Lineage + - When adding a new node in Lineage, the Display Name is supported in search. - Fixed the issues with displaying lineage from Metabase. ## Glossary + - Improved the automation of performance tests for Glossary. - Performance improvements to display a large Glossary. ## Data Insights + - Data Insights report has been improved. - The cost Analysis report has been optimized. ## Notifications + - The format for Slack notifications has been improved. ## Custom Properties + - Added enum type support for custom properties. ## Connectors + - Now BigQuery connector supports Primary, Foreign, and Unique Constraints. It fetches the column description for views. - Captures the SQL query that powers a Tableau DataModel. - Azure Key Vault is supported as a Secrets Manager. @@ -820,6 +912,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fixed an issue with the service display name after ingestion. ## Other Changes + - The functionality for mutually exclusive tags has been disabled. - PodGC set up for Argo workflows to delete the pods from the Kubernetes environment on a successful run of the pods. - Fixed the issue with the display of the personal access token. @@ -830,11 +923,9 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fixed the issues with testing the email settings. - Fixed an issue with adding tags. - - # 1.3.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2024, February 5th** [OpenMetadata 1.3 Release - Intuitive Lineage UI, Data Observability Alerts, Data Quality Incident Manager, Custom Metrics for Profiler, Knowledge Center Improvements, and lots more](https://blog.open-metadata.org/openmetadata-release-1-3-ac801834ee80) @@ -900,12 +991,14 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Custom metrics can be created at Table and Column levels. ## Profiler and Data Quality + - The Profiler has been improved to support sample data ingestion without computing other metrics. - Admins can configure the profiler to fetch up to 10,000 rows of sample data. - Sample data can be stored in S3 buckets. - Refined the default time range on the test case results page, adjusting it from the Last 3 days to the Last 30 days for a more encompassing view. ## Connectors + - New Google Cloud Storage for storage services. (Collate) - New Alation connector to migrate metadata into Collate. (Collate) - New Iceberg, SAS Viya, and Doris connectors. @@ -920,6 +1013,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - We now extract table descriptions when ingesting metadata from Salesforce. ## Glossary + - Supports soft delete for the default glossaries in OpenMetadata. - Supports the creation of tasks to request tags or a description. - Only the Owner can edit the Glossary term. @@ -939,10 +1033,12 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - The Settings page UI has been revamped. ## Data Insights + - Cost Analysis expanded to support BigQuery & Redshift. (Collate) - Improved the Data Insights Report sent via email. ## Other Changes + - Announcements can be notified over email, Slack, or Teams. - Alerts are sent to a user when they are mentioned in a task or activity feed. - We have improved the display of search results for column matches. When searching for columns, the matched results will be displayed and highlighted in the Preview pane. @@ -958,7 +1054,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # 1.2.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, October 26th** [OpenMetadata 1.2 Release - Domains, Data Products, Search Index, Stored Procedures, Glossary Approval Workflow, Customizable Landing Page, Applications, Knowledge Center, Cost Analysis, and lots more](https://blog.open-metadata.org/openmetadata-release-1-2-531f0e3c6d9a) @@ -977,10 +1073,12 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Assets can also be added as Data Products in a Domain. ## Search Index + - Elasticsearch or Opensearch connectors can now bring in the search index metadata into OpenMetadata. - The connector will populate the index’s mapping, settings, and sample data. ## Stored Procedures + - Added support for Stored Procedures. - Snowflake, Redshift, and BigQuery connectors are updated to bring stored procedure metadata into OpenMetadata. - The metadata workflow will bring the Stored Procedures and parse their executions to extract lineage information. @@ -998,8 +1096,9 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Color coding helps to visually differentiate and identify the data assets, when glossary terms are added to them. ## OpenMetadata Browser Extension + - Updated the Chrome browser extension for OpenMetadata with the new UI. -- Added support for Databases, Database Schemas, Tables, Dashboards, Charts, Pipelines, and Topics. +- Added support for Databases, Database Schemas, Tables, Dashboards, Charts, Pipelines, and Topics. ## Build Automation Applications @@ -1011,11 +1110,13 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - We will continue to add new Applications in upcoming releases. ## Lineage + - Performance improvements made for lineage based on the new release of SQLfluff. - Added support for `UPDATE … FROM Snowflake` queries - Added column-level lineage support for `SELECT *` queries ## Connectors + - Greenplum connector is now supported. - Couchbase connector is now supported. - Azure Data Lake Storage is now supported. (Collate) @@ -1046,21 +1147,24 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # 1.1.2 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, August 24th** {% /note %} ## Data Quality + - Added support for Postgres version 11.19. - Fixed MariaDB time column issues. ## Connectors + - Added JWT authentication support for Trino. - Fixed Snowflake connection test. - Fixed SageMaker ingestion. - Added external table support for BigQuery. ## UI Improvements + - Added Russian language support. - Supports Delete functionality for sample data. - Improved Schema page UX. @@ -1068,9 +1172,11 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Fixed the version history list. ## Ingestion + - Improved performance when ingesting table constraints. ## Backend + - Improved Glossary import validations. - Fixed Test Suite migrations and naming. - Fixed Classification migration. @@ -1079,7 +1185,7 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # 1.1.1 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, August 7th** {% /note %} @@ -1092,15 +1198,18 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Support custom cron for schedule ingestion ## Data Quality + - Fix BigQuery, MSSQL, and Clickhouse profiling errors ## Ingestion + - Fixed Airflow lineage extraction. - Added support for Databricks complex columns comments. - Fixed Athena lineage and usage parameter validation. - Airflow Managed APIs now support Airflow 2.6 ## Connectors + - New [Qliksense](qlik.com) Connector. - Hive supports extracting metadata directly from the metastore to speed up the execution. Users whose metastore is not exposed can still run the extraction pointing to Hive. - Added Usage & Lineage connector for Trino. @@ -1109,14 +1218,14 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Added support for JSON fields in SingleStore. ## Backend + - Bumped table and column names length - Aggregation Improvements for Search - Test Suite Improvements - # 1.1.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, June 30th** [OpenMetadata 1.1.0 Release - UI Overhaul, New Connectors, Improved Lineage Parsing, PII Masking, and lots more](https://blog.open-metadata.org/openmetadata-1-1-0-release-97c1fb603bcf) @@ -1165,84 +1274,97 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Improved monitoring of the Pipeline Service Client health. Any status errors between the OpenMetadata server and the Pipeline Service Client are now surfaced in a Prometheus metric `pipelineServiceClientStatus_counter_total` - Added AWS OpenSearch client-specific support. This allows us to update the Elasticsearch version support up to 7.16. - # 1.0.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, April 25th** [OpenMetadata 1.0 Release - Improved Schemas & APIs, Ingestion Improvements, Storage Services, Dashboard Data Models, Auto PII Classification, Localization, and much more](https://blog.open-metadata.org/openmetadata-1-0-release-beb34762d916) {% /note %} ## APIs & Schema + - **Stabilized** and improved the Schemas and APIs. - The APIs are **backward compatible**. ## Ingestion + - Connecting to your data sources has never been easier. Find all the necessary **permissions** and **connection details** directly in the UI. - When testing the connection, we now have a comprehensive list of **validations** to let you know which pieces of metadata can be extracted with the provided configuration. - **Performance** improvements when extracting metadata from sources such as Snowflake, Redshift, Postgres, and dbt. - New **Apache Impala** connector. ## Storage Services + - Based on your [feedback](https://github.com/open-metadata/OpenMetadata/discussions/8124), we created a new service to extract metadata from your **cloud storage**. - The Data Lake connector ingested one table per file, which covered only some of the use cases in a Data Platform. With **Storage Services**, you can now present accurate metadata from your tables, even when **partitioned**. - The first implementation has been done on **S3**, and we will keep adding support for other sources in the upcoming releases. ## Dashboard Data Models + - Dashboard Services now support the concept of **Data Models**: data that can be directly defined and managed in the Dashboard tooling itself, e.g., LookML models in Looker. - Data Models will help us close the gap between engineering and business by providing all the necessary metadata from sources typically used and managed by analysts or business users. - The first implementation has been done for **Tableau** and **Looker**. ## Queries + - Improved UI for **SQL Queries**, with faster loading times and allowing users to **vote** for popular queries! - Users can now create and share a **Query** directly from the UI, linking it to multiple tables if needed. ## Localization + - In 1.0, we have added **Localization** support for OpenMetadata. - Now you can use OpenMetadata in **English**, **French**, **Chinese**, **Japanese**, **Portuguese**, and **Spanish**. ## Glossary + - New and Improved **Glossary UI** - Easily search for Glossaries and any Glossary Term directly in the **global search**. - Instead of searching and tagging their assets individually, users can add Glossary Terms to multiple **assets** from the Glossary UI. ## Auto PII Classification + - Implemented an automated way to **tag PII data**. - The auto-classification is an optional step of the **Profiler** workflow. We will analyze the column names, and if sample data is being ingested, we will run NLP models on top of it. ## Search + - **Improved Relevancy**, with added support for partial matches. - **Improved Ranking**, with most used or higher Tier assets at the top of the search. - Support for **Classifications** and **Glossaries** in the global search. ## Security + - **SAML** support has been added. - Added option to mask passwords in the API response except for the `ingestion-bot` by setting the environment variable `MASK_PASSWORDS_API=true`. More info [here](/deployment/security/enable-password-masking). - **Deprecation Notice**: **SSO** Service accounts for Bots will be deprecated. **JWT** authentication will be the preferred method for creating Bots. ## Lineage + - Enhanced Lineage UI to display a large number of **nodes (1000+)**. - Improved UI for **better navigation**. - Improved **SQL parser** to extract lineage in the Lineage Workflows. ## Chrome Browser Extension + - All the metadata is at your fingertips while browsing Looker, Superset, etc., with the OpenMetadata Chrome Browser Extension. - **Chrome extension** supports Google SSO, Azure SSO, Okta, and AWS Cognito authentication. - You can Install the Chrome extension from **Chrome Web Store**. ## Other Changes + - The **Explore page** cards will now display a maximum of **ten tags**. - **Entity names** support apostrophes. - The **Summary panel** has been improved to be consistent across the UI. # 0.13.3 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, March 30th** {% /note %} ## Ingestion Framework + - Datalake Avro & Json, JsonZip support - BigQuery Profiler Ingestion for all regions - Support for Snowflake Geometry Type @@ -1261,57 +1383,69 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta - Support for DBT manifest V8 ## Roles & Policies + - A Non-Privileged user can add new 'Roles' to Teams - Fix Permissions API to consider the leaf nodes tags as well, example: table's column tags ## Search + - Improve Search Relevancy, by adding functional scoring and add ngram analyzer; - Enable search for entities using both name and displayName ## Security + - Enable LDAP configuration to be configured via environment variable - LDAP-s support connection without MTLS ## EntityName + - Relax data asset name restrictions to allow the special characters except "::" - Allow unicode character and digits in Entity ## Data Quality + - Fix column values between test # 0.13.2 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2023, January 30th** [OpenMetadata 0.13.2 Release - Improved SQL Lineage, Glossary Bulk Upload, Unified Tag Category API, Mutually Exclusive Tags, Chrome Extension, and lots more](https://blog.open-metadata.org/openmetadata-0-13-2-release-e32c0de93361) {% /note %} ## Improved SQL Lineage + - We have collaborated with the [sqllineage](https://github.com/reata/sqllineage) and [sqlfluff](https://www.sqlfluff.com/) communities - to improve the parsing capabilities of `sqllineage`. We'll continue to collaborate to ship further improvements in new releases. + to improve the parsing capabilities of `sqllineage`. We'll continue to collaborate to ship further improvements in new releases. ## New Glossary UI + - Moved from a tree view in the left panel to an easy to navigate list of the terms sorted alphabetically. - The term list shows the tags and descriptions in the cards. ## Glossary Import & Export + - You can now export your Glossary data as a CSV file. - In the same way, you can now bulk upload terms to a Glossary by adding their details in a CSV file. - The import utility will validate the file and show you a preview of the elements that are going to be imported to OpenMetadata. ## Unified Tag Category API + - Renamed Tag Categories to Classification, a more widely used term. - Updated the API to conform with the rest of the specification. More info [here](https://github.com/open-metadata/OpenMetadata/issues/9259). ## Mutually Exclusive Tags + - When creating a Classification or a Glossary term, you can now make the tags to be mutually exclusive. - If tags are set to be mutually exclusive, you won't be able to set multiple tags from the same category in the same asset. ## EntityName + - Special characters ## Ingestion Framework + - Performance Improvements: We are now getting descriptions in batch, making connectors such as Redshift or Snowflake way faster! - The Oracle connector now ships with the Thick mode enabled. - AWS QuickSight fixes @@ -1321,36 +1455,41 @@ You can find the GitHub release [here](https://github.com/open-metadata/OpenMeta # 0.13.1 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, December 20th** {% /note %} ## Profiler and Data Quality + - Freshness Metric has been introduced. Data freshness shows DML operations performed against a table and the number of rows affected. All this is displayed within the data profiler with filterable graphs. This is currently supported for BigQuery, Snowflake, and Redshift. - Support has been added for data quality tests on Data Lake. - UI has been improved to show table and column profile data on separate page. Legend is now selectable to filter for specific metrics ## Alerts and Notification + The logic for Notification Support has been improved. Users can define Alerts based on a Trigger (all data assets or a specific entity), Filters (events to consider), and Action (Slack, MS Teams, Email, Webhook) on where to send the alert. ## Ingestion -- Now, dbt has its own workflow. Previously, dbt was a part of metadata ingestion workflow. + +- Now, dbt has its own workflow. Previously, dbt was a part of metadata ingestion workflow. - Airflow Lineage Operator and the OpenMetadata Hook are now part of the ingestion package. Send Airflow metadata from your DAGs and safely store the OpenMetadata server connection directly in Airflow. - Multiple Databases (catalog) is now supported for the Databricks connector - Azure blob is now supported to backup your metadata into ## New Connectors + - OpenMetadata now supports Azure Datalake Storage Gen 2 ## General Improvements + - Users can update the description and tags for Topic Schema. Previously, the topic schemas were read-only. We now support Avro/Protobuf parsing and field level details for topic schemas. -- The layout for the Data Insight Report has been improved. We now display a line graph instead of a bar graph. The Most Viewed Data Assets are clickable to view the asset details page. +- The layout for the Data Insight Report has been improved. We now display a line graph instead of a bar graph. The Most Viewed Data Assets are clickable to view the asset details page. - Improvements have been made to Advanced Search. Now, when a filter is applied, the details of the filter selected are displayed for clarity. - On the Explore page UI, the Side Preview is now available for all data assets. Previously it was only displayed for tables. # 0.13.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, December 8th** [OpenMetadata 0.13.0 Release — Data Insights & KPIs, Lineage Traceability, Data Lake Profiler, Search Improvements, and lots more](https://blog.open-metadata.org/openmetadata-0-13-0-release-ac8ac5bd87c1) @@ -1359,58 +1498,70 @@ The logic for Notification Support has been improved. Users can define Alerts ba {% youtube videoId="oNbMnTW5AkE" start="0:00" end="7:51" width="560px" height="315px" /%} ## Data Insights and KPI + Data Insight allows admins to take an active approach in their metadata management. Data Insight provides a single-pane view of all the key metrics to best reflect the state of your data. Admins can define the Key Performance Indicators (KPIs) and set goals within OpenMetadata to work towards better documentation, ownership, and tiering. Alerts can be set against the KPIs to be received on a specified schedule. ## Lineage + The lineage UI has been transformed to enhance user experience. Users can get a holistic view of an entity from the Lineage tab. When an entity is selected, the UI displays end-to-end lineage traceability for the table and column levels. ## Profiler + With the OpenMetadata UI, users can now create and deploy profiling workflows for the Datalake connector, which supports AWS S3 and GCS ## SSO + Support for LDAP SSO has been added in this release ## Advance Search + Syntax Editor has been introduced for advanced search with And/Or conditions that help discover assets quickly ## New Connectors + - AWS SageMaker - AWS QuickSight - AWS Kinesis - Domo ## Messaging Service Schemas Improvements + Major enhancements have been made to how data is extracted from Kafka and Redpanda Messaging services. Previously, OpenMetadata extracted all the Topics in the messaging queue and also connected to the Schema Registry to get the Schemas. These schemas were taken as one payload and published to OpenMetadata. We now parse Avro and Protobuf Schemas to extract the fields. Now, users can document each of these fields within a schema by adding descriptions and tags. Users can search based on the fields in the Schema of a Topic. ## General Improvements + - Soft deleted entities can be restored. Currently, only the ML Models are not supported. - Soft deleted teams can be restored. When restoring a soft deleted parent team, the child teams will not be restored by default. # 0.12.3 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, November 18th** {% /note %} ## Bug Fixes + - User suggestion index mapping - Tag and Glossary terms caching # 0.12.2 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, October 20th** {% /note %} ## Ingestion + - Databricks lineage - Added support for Airflow version 2.2.2 as a workflow scheduler + ## Bug Fixes + - Support same table across different databases for the profiler # 0.12.1 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, October 3rd** {% /note %} @@ -1418,7 +1569,7 @@ Major enhancements have been made to how data is extracted from Kafka and Redpan - User/Password signup and login - Email notifications for forgotten password and new user signed up -- Admin can add new users and send an email +- Admin can add new users and send an email ## ElasticSearch full re-index through UI @@ -1432,17 +1583,17 @@ Major enhancements have been made to how data is extracted from Kafka and Redpan - We support ingesting DBT tags into OpenMetadata -## Bots Integration +## Bots Integration - Admins can create bots and their security mechanism from UI itself ## Bug Fixes -- Around 136 Features/Improvements/Tests made it into 0.12.1 release +- Around 136 Features/Improvements/Tests made it into 0.12.1 release # 0.12.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, September 7th** [OpenMetadata 0.12.0 Release](https://blog.open-metadata.org/openmetadata-0-12-0-release-1ac059700de4) @@ -1451,6 +1602,7 @@ Major enhancements have been made to how data is extracted from Kafka and Redpan {% youtube videoId="tv3pyCLcJfQ" start="0:00" end="17:04" width="560px" height="315px" /%} ## Team Hierarchy + Prior releases supported a flat hierarchy of just Teams and Users. In 0.12, support has been added for the entire organizational hierarchy with Business Unit, Division, Department, and Groups. An organization from small to very large can now be modeled in OpenMetadata with this feature. ## Roles and Policies @@ -1483,18 +1635,18 @@ In 0.12, we’ve also streamlined the Notifications menu with two separate tabs Users can get timely updates about the metadata change events for all entities through APIs using webhooks. The webhook integration with Slack has been further improved in this release. -OpenMetadata also supports webhook integration to Microsoft Teams, just as it supports Slack. Users can choose to receive notifications for only the required entities by using event filters based on when an entity is created, updated, or deleted. +OpenMetadata also supports webhook integration to Microsoft Teams, just as it supports Slack. Users can choose to receive notifications for only the required entities by using event filters based on when an entity is created, updated, or deleted. ## Tasks In the 0.11 release, a request to add or update descriptions for data assets could be converted to a Task. In the 0.12 release, Tasks can be created based on requests to create or update tags. Also, a glossary term approval workflow can be converted to a Task. - ## Secret Management Store Interface In 0.12, we have completely revamped how that secret is stored, accessed, and by whom; by introducing a Secrets Manager Interface to communicate with any Key Management Store. The KMS will mediate between any OpenMetadata internal requirement and sensitive information. That way, users can choose to use the underlying database as KMS, or any external system. The OpenMetadata community has already added support for AWS Key Management Service and AWS SSM. ## Connectors + New connectors are an essential part of every release in OpenMetadata. We are introducing four new connectors in this release: - Redpanda is a Kafka API-compatible streaming data platform for developers that unifies historical and real-time data. OpenMetadata now supports Redpanda as a Messaging service, which allows users to document its topics and schemas. Refer to the Redpanda documentation for more info. @@ -1503,6 +1655,7 @@ New connectors are an essential part of every release in OpenMetadata. We are in - Apache NiFi automates the flow of data between systems. OpenMetadata now supports a NiFi connector as the third new pipeline service on this release. ## Lineage + We’ve enhanced the performance of workflows by having a separate workflow for Lineage and Usage. By using two workflows for computing specific pieces of information, we can effectively filter down the queries to extract lineage. During table usage ingestion, the tables retrieved successfully will be cached, so that there is no need to repeat the same calls multiple times as many queries would be referencing the same tables. @@ -1510,10 +1663,11 @@ Usage queries have been optimized. A result limit has been added to Usage queries. ## Global Settings -The OpenMetadata Settings dropdown menu has been transformed into a single, centralized Settings page for added convenience in viewing all the available options. The Global Settings comprises setting options for Team Members, Access based on Roles and Policies, Services, Data Quality, Collaboration, Custom Attributes, and Integrations for webhooks and bots. Admins can view or update settings for various services like Slack, MS Teams, Webhooks, etc from the Global Settings page. +The OpenMetadata Settings dropdown menu has been transformed into a single, centralized Settings page for added convenience in viewing all the available options. The Global Settings comprises setting options for Team Members, Access based on Roles and Policies, Services, Data Quality, Collaboration, Custom Attributes, and Integrations for webhooks and bots. Admins can view or update settings for various services like Slack, MS Teams, Webhooks, etc from the Global Settings page. ## UI/UX Improvements + The major UI UX improvements have been done around Roles and Policies and a Global Settings page. Quite a lot of tweaks have been made to the UI to improve the UX. When creating a new user or when a user is registering for the first time, the dropdown menu for Teams now displays an option to ‘Show All’ teams. Previously, we supported the display of only the first 10 teams. An option has also been provided to search and filter. @@ -1522,13 +1676,14 @@ Manage Tab has been replaced with the manage button on the UI. # 0.11.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, July 1st** [OpenMetadata 0.11.0 Release](https://blog.open-metadata.org/openmetadata-0-11-release-8b82c85636a) {% /note %} ## Data Collaboration - Tasks, Announcements, & Emojis + - Tasks have been introduced as an extension to the ability to create conversations and post replies. - Tasks can be created around descriptions for tables, pipelines, dashboards, and topics. - Users can Request a description, or even Suggest a new description and make edits to an existing description. @@ -1540,14 +1695,17 @@ Manage Tab has been replaced with the manage button on the UI. - Task owners can provide description or accept/reject suggestions and those tasks are automatically closed. ## Column Level Lineage + - Column level lineage API support has been added in the backend. - Supports table level and column level lineage from Snowflake, Redshift, and BigQuery. ## Custom Properties + - Now supports adding new types and extending entities when organizations need to capture custom metadata. - New types and custom fields can be added to entities either using API or in OpenMetadata UI. ## Advanced Search + - Users can search by column, schema, database, owner, tag, and service. - Users can search by multiple parameters to narrow down the search results. - Separate advanced search options are available for Tables, Topics, Dashboards, Pipelines, and ML Models. @@ -1555,11 +1713,13 @@ Manage Tab has been replaced with the manage button on the UI. - Entity specific search options are also available - table specific options include Column, Schema, and Database, pipeline specific options include Task, and dashboards specific option includes Chart. ## Glossary UI Updates + - The Glossary UI has been upgraded. - The arrangement to display the Summary, Related Terms, Synonyms, and References has been changed. - Reviewers are shown on the right panel with an option to add or remove existing reviewers. ## Profiler and Data Quality Improvements + - Seven additional data quality tests have been added as follows. - - tableColumnCountToBeBetween: Ensure the number of columns in your table stays within the expected range - - tableColumnNameToExist: Check that a specific column is in your table @@ -1573,10 +1733,12 @@ Manage Tab has been replaced with the manage button on the UI. - Developed a direct integration between Great Expectations and OpenMetadata. Now, you can add custom actions to your Great Expectations checkpoints file that will automatically ingest your data quality tests results into OpenMetadata at the end of your checkpoint file run. ## ML Models + - ML Model entities have been added to the UI. - Supports ingestion through the UI from MLflow. ## Connectors + - Five new connectors have been added - Airbyte, Mode, AWS Data Lake, Google Cloud Data Lake, and Apache Pinot. - DBT Cloud support was added and we now extract manifest and catalog files from API. - The ingestion scheduler now supports a minute level selection. @@ -1584,6 +1746,7 @@ Manage Tab has been replaced with the manage button on the UI. - The Looker connector now fetches the ‘Usage’ and ‘Access’ metadata for Dashboards and Charts. ## UI Improvements + - The OpenMetadata UI has a new layout. - In the Activity Feeds, the options to reply to a conversation, as well as to delete can now be found on hovering over the conversation. - Users can react with Emojis on the activity feeds, conversations and replies. @@ -1592,6 +1755,7 @@ Manage Tab has been replaced with the manage button on the UI. - A tooltip has been added to display the FQN on hover in the Activity Feed header. ## Other Changes + - Admin users define Roles and associate these roles to Teams. When a user picks a Team, the Role gets automatically assigned. - An option has been added to recreate a fresh index from the data available in Elasticsearch. - A simple webhook server has been added to the metadata command to register and listen to the metadata change events. @@ -1602,7 +1766,7 @@ Manage Tab has been replaced with the manage button on the UI. # 0.10.1 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, May 17th** {% /note %} @@ -1614,7 +1778,7 @@ Manage Tab has been replaced with the manage button on the UI. # 0.10.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, April 27th** [OpenMetadata 0.10.0 Release](https://blog.open-metadata.org/openmetadata-0-10-0-release-82c4f5533c3f) @@ -1622,7 +1786,7 @@ Manage Tab has been replaced with the manage button on the UI. ## Support for Database Schema -OpenMetadata supports databases, service name databases, and tables. We’ve added Database Schema as part of the FQN. +OpenMetadata supports databases, service name databases, and tables. We’ve added Database Schema as part of the FQN. For each external data source, we ingest the database, as well as the tables that are contained underneath the schemas. ## Support for Hard Delete @@ -1632,39 +1796,39 @@ and ingestion. Hard deleting an entity removes the entity and all of its relatio ## Deploy Ingestion from UI -OpenMetadata has refactored the service connections to simplify the ingestion jobs from both the ingestion framework +OpenMetadata has refactored the service connections to simplify the ingestion jobs from both the ingestion framework and the UI. We now use the pydantic models automatically generated from the JSON schemas for the connection definition. The ‘Add Service’ form is automatically generated in the UI based on the JSON schema specifications for the various connectors that are supported in OpenMetadata. ## Download dbt Manifest Files from Amazon S3 or Google Cloud Storage -Previously, when ingesting the models and lineage from dbt, we passed the path of the dbt manifest and catalog files -directly into the workflow. We’ve worked on improving the quality of life of dbt. Now, we can dynamically download -these files from Amazon S3 or Google Cloud Storage. This way we can have any other process to connect to the dbt, -extract the catalog, and put it into any cloud service. We just need the path name and workflow job details from the +Previously, when ingesting the models and lineage from dbt, we passed the path of the dbt manifest and catalog files +directly into the workflow. We’ve worked on improving the quality of life of dbt. Now, we can dynamically download +these files from Amazon S3 or Google Cloud Storage. This way we can have any other process to connect to the dbt, +extract the catalog, and put it into any cloud service. We just need the path name and workflow job details from the metadata extraction to be able to ingest metadata. ## JSON Schema based Connection Definition -Each service (database, dashboard, messaging, or pipeline service) has its own configuration specifications, with some -unique requirements for some services. Instead of the ad hoc definitions of the source module in Python for each +Each service (database, dashboard, messaging, or pipeline service) has its own configuration specifications, with some +unique requirements for some services. Instead of the ad hoc definitions of the source module in Python for each connector, we’ve worked on the full refactoring of the ingestion framework. We now use the pydantic models automatically generated from the JSON schemas for the connection definition. ## Airflow Rest APIs -The Airflow REST APIs have been refactored. With our API centric model, we are creating a custom airflow rest API +The Airflow REST APIs have been refactored. With our API centric model, we are creating a custom airflow rest API directly on top of Airflow using plugins. This passes the connection information to automatically generate all the dags and prepares handy methods to help us test the connection to the source before creating the service. ## UI Changes - The UI improvements are directed toward providing a consistent user experience. -- Hard Deletion of Entities: With the support for the hard deletion of entities, we can permanently delete tables, - topics, or services. When the entity is hard deleted, the entity and all its relationships are removed. +- Hard Deletion of Entities: With the support for the hard deletion of entities, we can permanently delete tables, + topics, or services. When the entity is hard deleted, the entity and all its relationships are removed. This generates an ‘EntityDeleted’ change event. -- Dynamic “Add Service” Forms: The ‘Add Service’ form is automatically generated in the UI based on the JSON +- Dynamic “Add Service” Forms: The ‘Add Service’ form is automatically generated in the UI based on the JSON schema specifications for the various connectors that are supported in OpenMetadata. - UI Support for Database Schema as part of FQN: The database schema has been introduced in the 0.10 release. All the entity pages now support Database Schema in the UI. @@ -1675,17 +1839,18 @@ and prepares handy methods to help us test the connection to the source before c - Add User: A user can be added from the Users page. ## Security Changes -- **Support Refresh Tokens for Auth0 and Okta SSO**: The JWT tokens generated by the SSO providers expire by default - in about an hour, making the user re-login often. In this release, we’ve added support for refresh tokens for Auth0 + +- **Support Refresh Tokens for Auth0 and Okta SSO**: The JWT tokens generated by the SSO providers expire by default + in about an hour, making the user re-login often. In this release, we’ve added support for refresh tokens for Auth0 and Okta SSO. The tokens are refreshed silently behind the scenes to provide an uninterrupted user experience. In future releases, we’ll continue to stabilize authentication and add refresh tokens for the other SSO providers. -- **Custom OIDC SSO**: OpenMetadata now supports integration with your custom-built OIDC SSO for authentication. +- **Custom OIDC SSO**: OpenMetadata now supports integration with your custom-built OIDC SSO for authentication. This is supported both on the front end for user authentication and on the ingestion side. - **Azure SSO**: Support has been added for Azure SSO on Airflow. # 0.9.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, March 10th** [OpenMetadata 0.9.0 Release](https://blog.open-metadata.org/openmetadata-0-9-0-release-8e7b93ab1882) @@ -1700,6 +1865,7 @@ and prepares handy methods to help us test the connection to the source before c - Table details - Click through on usage to see who or what services are using it, what queries are pulling from it. ## Data Quality + - Ability to create and monitor the test cases. - Data Quality Tests support with Json Schemas and APIs. - UI Integration to enable user to write tests and run them on Airflow. @@ -1709,10 +1875,11 @@ and prepares handy methods to help us test the connection to the source before c - Glossaries are a Controlled Vocabulary in an organization used to define the concepts and terminologies specific to a particular domain. - API & Schemas to support Glossary. -- UI support to add Glossary and Glossary Terms. +- UI support to add Glossary and Glossary Terms. - Support for using Glossary terms to annotate Entities and Search using Glossary Terms. ## Connectors + - Apache Iceberg - Azure SQL - Clickhouse @@ -1729,10 +1896,12 @@ and prepares handy methods to help us test the connection to the source before c - Amundsen, Import Metadata from Amundsen into OpenMetadata ## Lineage + - DataSource SQL Parsing support to extract Lineage - View Lineage support ## Pipeline + - Capture pipeline status as it happens ## Security @@ -1743,23 +1912,25 @@ and prepares handy methods to help us test the connection to the source before c # 0.8.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2022, January 22nd** [OpenMetadata 0.8.0 Release](https://blog.open-metadata.org/openmetadata-0-8-0-release-ca09bd2fbf54) {% /note %} ## Access Control Policies + - Design of Access Control Policies. - Provide Role based access control with community feedback. ## Eventing Webhook - Register webhooks to get metadata event notifications. -- Metadata Change Event integration into Slack and framework for integration into other services such as +- Metadata Change Event integration into Slack and framework for integration into other services such as Kafka or other Notification frameworks ## Connectors + - Delta Lake - Iceberg - PowerBI @@ -1767,25 +1938,29 @@ and prepares handy methods to help us test the connection to the source before c # 0.7.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2021, November 17th** [OpenMetadata 0.7.0 Release](https://blog.open-metadata.org/openmetadata-0-7-0-release-9f741b8d5089) {% /note %} ## UI - Activity Feed, Improved UX for Search + - Users will have access to Activity Feed of all the changes to the Metadata. - New and Improved UX for Search and Landing page. ## Support for Table Location + - Extract Location information from Glue, Redshift. - Show Location details on the Table Page. ## ElasticSearch Improvements + - Support SSL (including self-signed certs) enabled ElasticSearch. - New entities will be indexed into ElasticSearch directly ## Connectors + - Metabase - Apache Druid - Glue Improvements @@ -1794,88 +1969,104 @@ and prepares handy methods to help us test the connection to the source before c - Amundsen Import connector ## Other features + - Metadata Change Event integration into Slack and framework for integration into other services such as Kafka or other Notification frameworks - Delta Lake support, Databricks, Iceberg # 0.6.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2021, November 17th** [OpenMetadata 0.6.0 Release — Metadata Versioning, Events API, One-Click Ingestion, and more](https://blog.open-metadata.org/openmetadata-0-6-0-release-metadata-versioning-events-api-one-click-ingestion-and-more-4394c4f08e0b) {% /note %} ## Metadata Versioning and Eventing Framework + - Capture changes to Entity Metadata from source and user interactions as versions. - Versioned changes will be published as events for clients to consume to take actions on. ## Data Reliability + - Improvements to Data Reliability library. - Capture custom measurements through user provided SQL. ## Airflow APIs + - Airflow APIs to deploy DAGS and manage them. - UI integration to deploy ingestion workflows. ## Connectors + - AWS Glue - dbt - MariaDB # 0.5.0 Release -{% note noteType="Tip" %} +{% note noteType="Tip" %} **2021, October 19th** [OpenMetadata 0.5.0 Release is here — Lineage, Pipelines, Complex Types, Data Profiler and so much more](https://blog.open-metadata.org/openmetadata-0-5-0-1144a4000644) {% /note %} ## Support for Lineage + - Lineage related schemas and APIs. - Lineage metadata integration from AirFlow for tables. - UI changes to show lineage information to the users. ## Data Reliability + - Improvements to Data Profiler. - UI integration with Data Profiler to show how the table profile looks over the period of time. ## Complex Types + - Support complex types such as Struct, Array with nested fields. - UI support to add expand complex types and tag, add description for nested fields. ## Connectors + - Trino - Redash ## Other features + - Pipeline Entities are supported. - Integration with Airflow to extract Pipeline details. # 0.4.0 Release -{% note noteType="Tip" %} + +{% note noteType="Tip" %} **2021, September 20th** [OpenMetadata 0.4.0 Release — Dashboards, Topics, Data Reliability](https://blog.open-metadata.org/openmetadata-0-4-0-release-dashboards-topics-data-reliability-14e8672ae0f5) {% /note %} ## Support for Kafka (and Pulsar WIP) + - Support for Message Service and Topic entities in schemas, APIs, and UI. - Kafka connector and ingestion support for Confluent Schema Registry. ## Support for Dashboards + - Support for Dashboard services, Dashboards, and Charts entities in schemas, APIs, and UI. - Looker, Superset, Tableau connector, and ingestion support. ## User Interface + - Sort search results based on Usage, Relevance, and Last updated time. - Search string highlighted in search results. - Support for Kafka and Dashboards from Looker, Superset, and Tableau. ## Other features + - Pluggable SSO integration - Auth0 support. - Support for Presto. ## Work in progress + - Salesforce CRM connector. - Data profiler to profile tables in ingestion framework and show it table details page. diff --git a/openmetadata-docs/images/connectors/sap-erp.webp b/openmetadata-docs/images/connectors/sap-erp.webp new file mode 100644 index 000000000000..180eeaa9dace Binary files /dev/null and b/openmetadata-docs/images/connectors/sap-erp.webp differ diff --git a/openmetadata-docs/images/connectors/sap-hana.webp b/openmetadata-docs/images/connectors/sap-hana.webp index e15d47f7eac9..88076387631b 100644 Binary files a/openmetadata-docs/images/connectors/sap-hana.webp and b/openmetadata-docs/images/connectors/sap-hana.webp differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/connector1.1.png b/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/connector1.1.png new file mode 100644 index 000000000000..c2852cc6defd Binary files /dev/null and b/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/connector1.1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/connector1.2.png b/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/connector1.2.png new file mode 100644 index 000000000000..f80bf3452f4b Binary files /dev/null and b/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/connector1.2.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/connector1.png b/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/connector1.png new file mode 100644 index 000000000000..0d52c6e415de Binary files /dev/null and b/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/connector1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/delete1.png b/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/delete1.png index 835024508702..21370c9c76a3 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/delete1.png and b/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/delete1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/snowflake2.png b/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/snowflake2.png index b0b312150e6c..0fc60e594e3d 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/snowflake2.png and b/openmetadata-docs/images/v1.5/how-to-guides/admin-guide/snowflake2.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/auto1.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/auto1.png index 81c70080b2b2..9a4964881f87 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/auto1.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/auto1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/auto2.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/auto2.png index 862e7a44410a..1db4a30df284 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/auto2.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/auto2.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/auto3.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/auto3.png index a6c1cf5d83cd..1550fcf39ea4 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/auto3.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/auto3.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/automation-5.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/automation-5.png index 7f18a8a25134..c5d359eec64f 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/automation-5.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/automation-5.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/banking.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/banking.png index f48d463a01d4..5c126dc8aeaa 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/banking.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/banking.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/data-products.gif b/openmetadata-docs/images/v1.5/how-to-guides/governance/data-products.gif index 4dfb1daac7c1..5c45215d48ef 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/data-products.gif and b/openmetadata-docs/images/v1.5/how-to-guides/governance/data-products.gif differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/domains.gif b/openmetadata-docs/images/v1.5/how-to-guides/governance/domains.gif index 159aba9821ab..f202538ddff5 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/domains.gif and b/openmetadata-docs/images/v1.5/how-to-guides/governance/domains.gif differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/glossary-term.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/glossary-term.png index 949cb8380d0e..75a977c53578 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/glossary-term.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/glossary-term.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/glossary4.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/glossary4.png index 5f681c65bc63..a5c5fb4017ab 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/glossary4.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/glossary4.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/tag1.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/tag1.png index 10de22ee236b..0d56641b2ff8 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/tag1.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/tag1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/tag2.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/tag2.png index 8add0addb478..d276f3322f49 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/tag2.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/tag2.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/tag3.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/tag3.png index f6efaeb875f6..86ab8d5641f6 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/tag3.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/tag3.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/tag4.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/tag4.png index e3b7091eb9dc..534b17804a3c 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/tag4.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/tag4.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/tag7.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/tag7.png index 885a50007a74..681bd9575e57 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/tag7.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/tag7.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/term1.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/term1.png index 5c879cebcaf6..8db5f23bda2b 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/term1.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/term1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/term2.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/term2.png index b0ac6b724102..36d764ef27ea 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/term2.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/term2.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/term3.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/term3.png index 84fde40d7fc3..b5df3531141b 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/term3.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/term3.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/governance/version.png b/openmetadata-docs/images/v1.5/how-to-guides/governance/version.png index d4aafb3964dd..3380bfb28281 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/governance/version.png and b/openmetadata-docs/images/v1.5/how-to-guides/governance/version.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/insights/insights1.png b/openmetadata-docs/images/v1.5/how-to-guides/insights/insights1.png index 7a072c950965..46636e6dd163 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/insights/insights1.png and b/openmetadata-docs/images/v1.5/how-to-guides/insights/insights1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/insights/insights2.1.png b/openmetadata-docs/images/v1.5/how-to-guides/insights/insights2.1.png new file mode 100644 index 000000000000..b2a636d9c5fd Binary files /dev/null and b/openmetadata-docs/images/v1.5/how-to-guides/insights/insights2.1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/insights/insights2.2.png b/openmetadata-docs/images/v1.5/how-to-guides/insights/insights2.2.png new file mode 100644 index 000000000000..399607fdbb37 Binary files /dev/null and b/openmetadata-docs/images/v1.5/how-to-guides/insights/insights2.2.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/insights/insights2.png b/openmetadata-docs/images/v1.5/how-to-guides/insights/insights2.png index 992755bc0d98..b2f9e6081f96 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/insights/insights2.png and b/openmetadata-docs/images/v1.5/how-to-guides/insights/insights2.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/lineage/l1.png b/openmetadata-docs/images/v1.5/how-to-guides/lineage/l1.png index dc1012b269e0..c0872d735dd7 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/lineage/l1.png and b/openmetadata-docs/images/v1.5/how-to-guides/lineage/l1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/lineage/lineage1.png b/openmetadata-docs/images/v1.5/how-to-guides/lineage/lineage1.png index b32c522fab88..cdb2c2c2f7f3 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/lineage/lineage1.png and b/openmetadata-docs/images/v1.5/how-to-guides/lineage/lineage1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf1.1.png b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf1.1.png new file mode 100644 index 000000000000..ea47a001be28 Binary files /dev/null and b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf1.1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf1.2.png b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf1.2.png new file mode 100644 index 000000000000..0f4c93282602 Binary files /dev/null and b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf1.2.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf1.png b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf1.png index a9ff044b903b..a993b5c5a5c8 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf1.png and b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf1.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf2.png b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf2.png index ef147f42fcf9..f01e1cfba3f4 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf2.png and b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf2.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf3.png b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf3.png index 8302d7ee0271..aef799c88cd9 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf3.png and b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf3.png differ diff --git a/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf4.png b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf4.png index 67a3d191e63e..1303a391e3ab 100644 Binary files a/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf4.png and b/openmetadata-docs/images/v1.5/how-to-guides/lineage/wkf4.png differ diff --git a/openmetadata-docs/images/v1.6/connectors/sap-erp/add-new-service.png b/openmetadata-docs/images/v1.6/connectors/sap-erp/add-new-service.png new file mode 100644 index 000000000000..e307896ee2d0 Binary files /dev/null and b/openmetadata-docs/images/v1.6/connectors/sap-erp/add-new-service.png differ diff --git a/openmetadata-docs/images/v1.6/connectors/sap-erp/add-reference.png b/openmetadata-docs/images/v1.6/connectors/sap-erp/add-reference.png new file mode 100644 index 000000000000..e627633810bc Binary files /dev/null and b/openmetadata-docs/images/v1.6/connectors/sap-erp/add-reference.png differ diff --git a/openmetadata-docs/images/v1.6/connectors/sap-erp/add-service-as-endpoint.png b/openmetadata-docs/images/v1.6/connectors/sap-erp/add-service-as-endpoint.png new file mode 100644 index 000000000000..0f41f446dbc4 Binary files /dev/null and b/openmetadata-docs/images/v1.6/connectors/sap-erp/add-service-as-endpoint.png differ diff --git a/openmetadata-docs/images/v1.6/connectors/sap-erp/create-project.png b/openmetadata-docs/images/v1.6/connectors/sap-erp/create-project.png new file mode 100644 index 000000000000..f842c37d5b3d Binary files /dev/null and b/openmetadata-docs/images/v1.6/connectors/sap-erp/create-project.png differ diff --git a/openmetadata-docs/images/v1.6/connectors/sap-erp/data-definition-object.png b/openmetadata-docs/images/v1.6/connectors/sap-erp/data-definition-object.png new file mode 100644 index 000000000000..8fbd8446177c Binary files /dev/null and b/openmetadata-docs/images/v1.6/connectors/sap-erp/data-definition-object.png differ diff --git a/openmetadata-docs/images/v1.6/connectors/sap-erp/register-odata-service.png b/openmetadata-docs/images/v1.6/connectors/sap-erp/register-odata-service.png new file mode 100644 index 000000000000..1982ce0eb764 Binary files /dev/null and b/openmetadata-docs/images/v1.6/connectors/sap-erp/register-odata-service.png differ diff --git a/openmetadata-docs/images/v1.6/connectors/sap-erp/select-service.png b/openmetadata-docs/images/v1.6/connectors/sap-erp/select-service.png new file mode 100644 index 000000000000..dc8440250c5a Binary files /dev/null and b/openmetadata-docs/images/v1.6/connectors/sap-erp/select-service.png differ diff --git a/openmetadata-docs/images/v1.6/connectors/sap-erp/service-connection.png b/openmetadata-docs/images/v1.6/connectors/sap-erp/service-connection.png new file mode 100644 index 000000000000..3f338a572012 Binary files /dev/null and b/openmetadata-docs/images/v1.6/connectors/sap-erp/service-connection.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/connector1.1.png b/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/connector1.1.png new file mode 100644 index 000000000000..c2852cc6defd Binary files /dev/null and b/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/connector1.1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/connector1.2.png b/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/connector1.2.png new file mode 100644 index 000000000000..f80bf3452f4b Binary files /dev/null and b/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/connector1.2.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/connector1.png b/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/connector1.png new file mode 100644 index 000000000000..0d52c6e415de Binary files /dev/null and b/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/connector1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/delete1.png b/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/delete1.png index 835024508702..21370c9c76a3 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/delete1.png and b/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/delete1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/snowflake2.png b/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/snowflake2.png index b0b312150e6c..0fc60e594e3d 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/snowflake2.png and b/openmetadata-docs/images/v1.6/how-to-guides/admin-guide/snowflake2.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/auto1.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/auto1.png index 81c70080b2b2..9a4964881f87 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/auto1.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/auto1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/auto2.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/auto2.png index 862e7a44410a..1db4a30df284 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/auto2.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/auto2.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/auto3.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/auto3.png index a6c1cf5d83cd..1550fcf39ea4 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/auto3.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/auto3.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/automation-5.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/automation-5.png index 7f18a8a25134..c5d359eec64f 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/automation-5.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/automation-5.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/banking.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/banking.png index f48d463a01d4..5c126dc8aeaa 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/banking.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/banking.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/data-products.gif b/openmetadata-docs/images/v1.6/how-to-guides/governance/data-products.gif index 4dfb1daac7c1..5c45215d48ef 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/data-products.gif and b/openmetadata-docs/images/v1.6/how-to-guides/governance/data-products.gif differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/domains.gif b/openmetadata-docs/images/v1.6/how-to-guides/governance/domains.gif index 159aba9821ab..f202538ddff5 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/domains.gif and b/openmetadata-docs/images/v1.6/how-to-guides/governance/domains.gif differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/glossary-term.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/glossary-term.png index 949cb8380d0e..75a977c53578 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/glossary-term.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/glossary-term.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/glossary4.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/glossary4.png index 5f681c65bc63..a5c5fb4017ab 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/glossary4.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/glossary4.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/tag1.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/tag1.png index 10de22ee236b..0d56641b2ff8 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/tag1.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/tag1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/tag2.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/tag2.png index 8add0addb478..d276f3322f49 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/tag2.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/tag2.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/tag3.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/tag3.png index f6efaeb875f6..86ab8d5641f6 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/tag3.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/tag3.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/tag4.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/tag4.png index e3b7091eb9dc..534b17804a3c 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/tag4.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/tag4.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/tag7.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/tag7.png index 885a50007a74..681bd9575e57 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/tag7.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/tag7.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/term1.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/term1.png index 5c879cebcaf6..8db5f23bda2b 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/term1.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/term1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/term2.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/term2.png index b0ac6b724102..36d764ef27ea 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/term2.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/term2.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/term3.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/term3.png index 84fde40d7fc3..b5df3531141b 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/term3.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/term3.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/governance/version.png b/openmetadata-docs/images/v1.6/how-to-guides/governance/version.png index d4aafb3964dd..3380bfb28281 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/governance/version.png and b/openmetadata-docs/images/v1.6/how-to-guides/governance/version.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/insights/insights1.png b/openmetadata-docs/images/v1.6/how-to-guides/insights/insights1.png index 7a072c950965..46636e6dd163 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/insights/insights1.png and b/openmetadata-docs/images/v1.6/how-to-guides/insights/insights1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/insights/insights2.1.png b/openmetadata-docs/images/v1.6/how-to-guides/insights/insights2.1.png new file mode 100644 index 000000000000..b2a636d9c5fd Binary files /dev/null and b/openmetadata-docs/images/v1.6/how-to-guides/insights/insights2.1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/insights/insights2.2.png b/openmetadata-docs/images/v1.6/how-to-guides/insights/insights2.2.png new file mode 100644 index 000000000000..399607fdbb37 Binary files /dev/null and b/openmetadata-docs/images/v1.6/how-to-guides/insights/insights2.2.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/insights/insights2.png b/openmetadata-docs/images/v1.6/how-to-guides/insights/insights2.png index 992755bc0d98..b2f9e6081f96 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/insights/insights2.png and b/openmetadata-docs/images/v1.6/how-to-guides/insights/insights2.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/lineage/l1.png b/openmetadata-docs/images/v1.6/how-to-guides/lineage/l1.png index dc1012b269e0..c0872d735dd7 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/lineage/l1.png and b/openmetadata-docs/images/v1.6/how-to-guides/lineage/l1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/lineage/lineage1.png b/openmetadata-docs/images/v1.6/how-to-guides/lineage/lineage1.png index b32c522fab88..cdb2c2c2f7f3 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/lineage/lineage1.png and b/openmetadata-docs/images/v1.6/how-to-guides/lineage/lineage1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf1.1.png b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf1.1.png new file mode 100644 index 000000000000..ea47a001be28 Binary files /dev/null and b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf1.1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf1.2.png b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf1.2.png new file mode 100644 index 000000000000..0f4c93282602 Binary files /dev/null and b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf1.2.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf1.png b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf1.png index a9ff044b903b..a993b5c5a5c8 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf1.png and b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf1.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf2.png b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf2.png index ef147f42fcf9..f01e1cfba3f4 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf2.png and b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf2.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf3.png b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf3.png index 8302d7ee0271..aef799c88cd9 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf3.png and b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf3.png differ diff --git a/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf4.png b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf4.png index 67a3d191e63e..1303a391e3ab 100644 Binary files a/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf4.png and b/openmetadata-docs/images/v1.6/how-to-guides/lineage/wkf4.png differ diff --git a/openmetadata-service/pom.xml b/openmetadata-service/pom.xml index 09892d997969..1a7ca3455799 100644 --- a/openmetadata-service/pom.xml +++ b/openmetadata-service/pom.xml @@ -25,6 +25,8 @@ 2.3.4 2.5.0-rc2 5.7.0 + 3.6.0 + 3.3.1 @@ -893,6 +895,7 @@ org.apache.maven.plugins maven-source-plugin + ${maven-source-plugin.version} org.apache.maven.plugins diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/Entity.java b/openmetadata-service/src/main/java/org/openmetadata/service/Entity.java index 6600a380fef1..498cc407e3c2 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/Entity.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/Entity.java @@ -122,6 +122,7 @@ public final class Entity { public static final String FIELD_STYLE = "style"; public static final String FIELD_LIFE_CYCLE = "lifeCycle"; + public static final String FIELD_CERTIFICATION = "certification"; public static final String FIELD_DISABLED = "disabled"; @@ -242,6 +243,7 @@ public final class Entity { public static final String DOCUMENT = "document"; // ServiceType - Service Entity name map static final Map SERVICE_TYPE_ENTITY_MAP = new EnumMap<>(ServiceType.class); + public static final List PARENT_ENTITY_TYPES = new ArrayList<>(); static { SERVICE_TYPE_ENTITY_MAP.put(ServiceType.DATABASE, DATABASE_SERVICE); @@ -253,6 +255,25 @@ public final class Entity { SERVICE_TYPE_ENTITY_MAP.put(ServiceType.STORAGE, STORAGE_SERVICE); SERVICE_TYPE_ENTITY_MAP.put(ServiceType.SEARCH, SEARCH_SERVICE); SERVICE_TYPE_ENTITY_MAP.put(ServiceType.API, API_SERVICE); + PARENT_ENTITY_TYPES.addAll( + listOf( + DATABASE_SERVICE, + DASHBOARD_SERVICE, + MESSAGING_SERVICE, + MLMODEL_SERVICE, + PIPELINE_SERVICE, + API_SERVICE, + API_COLLCECTION, + STORAGE_SERVICE, + METADATA_SERVICE, + SEARCH_SERVICE, + DATABASE, + DATABASE_SCHEMA, + CLASSIFICATION, + GLOSSARY, + DOMAIN, + TEST_SUITE, + TEAM)); } private Entity() {} diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/AbstractEventConsumer.java b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/AbstractEventConsumer.java index 67aff3e3d663..9f5085b57aa7 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/AbstractEventConsumer.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/AbstractEventConsumer.java @@ -53,6 +53,7 @@ public abstract class AbstractEventConsumer public static final String OFFSET_EXTENSION = "eventSubscription.Offset"; public static final String METRICS_EXTENSION = "eventSubscription.metrics"; public static final String FAILED_EVENT_EXTENSION = "eventSubscription.failedEvent"; + private long offset = -1; private AlertMetrics alertMetrics; @@ -77,8 +78,13 @@ protected void doInit(JobExecutionContext context) { // To be implemented by the Subclass if needed } + public enum FailureTowards { + SUBSCRIBER, + PUBLISHER + } + @Override - public void handleFailedEvent(EventPublisherException ex) { + public void handleFailedEvent(EventPublisherException ex, boolean errorOnSub) { UUID failingSubscriptionId = ex.getChangeEventWithSubscription().getLeft(); ChangeEvent changeEvent = ex.getChangeEventWithSubscription().getRight(); LOG.debug( @@ -87,6 +93,8 @@ public void handleFailedEvent(EventPublisherException ex) { failingSubscriptionId, changeEvent); + FailureTowards source = errorOnSub ? FailureTowards.SUBSCRIBER : FailureTowards.PUBLISHER; + Entity.getCollectionDAO() .eventSubscriptionDAO() .upsertFailedEvent( @@ -97,7 +105,9 @@ public void handleFailedEvent(EventPublisherException ex) { .withFailingSubscriptionId(failingSubscriptionId) .withChangeEvent(changeEvent) .withRetriesLeft(eventSubscription.getRetries()) - .withTimestamp(System.currentTimeMillis()))); + .withReason(ex.getMessage()) + .withTimestamp(System.currentTimeMillis())), + source.toString()); } private long loadInitialOffset(JobExecutionContext context) { @@ -164,7 +174,7 @@ public void publishEvents(Map> events) { alertMetrics.withSuccessEvents(alertMetrics.getSuccessEvents() + 1); } catch (EventPublisherException e) { alertMetrics.withFailedEvents(alertMetrics.getFailedEvents() + 1); - handleFailedEvent(e); + handleFailedEvent(e, false); } } } @@ -176,6 +186,7 @@ public void commit(JobExecutionContext jobExecutionContext) { // Upsert Offset EventSubscriptionOffset eventSubscriptionOffset = new EventSubscriptionOffset().withOffset(offset).withTimestamp(currentTime); + Entity.getCollectionDAO() .eventSubscriptionDAO() .upsertSubscriberExtension( @@ -183,6 +194,7 @@ public void commit(JobExecutionContext jobExecutionContext) { OFFSET_EXTENSION, "eventSubscriptionOffset", JsonUtils.pojoToJson(eventSubscriptionOffset)); + jobExecutionContext .getJobDetail() .getJobDataMap() @@ -195,6 +207,7 @@ public void commit(JobExecutionContext jobExecutionContext) { .withFailedEvents(alertMetrics.getFailedEvents()) .withSuccessEvents(alertMetrics.getSuccessEvents()) .withTimestamp(currentTime); + Entity.getCollectionDAO() .eventSubscriptionDAO() .upsertSubscriberExtension( @@ -202,6 +215,7 @@ public void commit(JobExecutionContext jobExecutionContext) { METRICS_EXTENSION, "alertMetrics", JsonUtils.pojoToJson(metrics)); + jobExecutionContext.getJobDetail().getJobDataMap().put(METRICS_EXTENSION, alertMetrics); // Populate the Destination map diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/AlertPublisher.java b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/AlertPublisher.java index 17d629759b91..ccd6017dd361 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/AlertPublisher.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/AlertPublisher.java @@ -13,7 +13,11 @@ public void sendAlert(UUID receiverId, ChangeEvent event) throws EventPublisherE if (destinationMap.containsKey(receiverId)) { Destination destination = destinationMap.get(receiverId); if (Boolean.TRUE.equals(destination.getEnabled())) { - destination.sendMessage(event); + try { + destination.sendMessage(event); + } catch (EventPublisherException ex) { + handleFailedEvent(ex, true); + } } else { LOG.debug( "Event Subscription:{} Skipping sending message since, disabled subscription with Id: {}", diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/Consumer.java b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/Consumer.java index 0c61ed205cc4..9f1c8482062a 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/Consumer.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/Consumer.java @@ -26,7 +26,7 @@ public interface Consumer { void publishEvents(Map> events); - void handleFailedEvent(EventPublisherException e); + void handleFailedEvent(EventPublisherException e, boolean errorOnSub); void commit(JobExecutionContext jobExecutionContext); } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/email/EmailPublisher.java b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/email/EmailPublisher.java index cc31a5c0ff37..b1789fc13d03 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/email/EmailPublisher.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/email/EmailPublisher.java @@ -73,7 +73,9 @@ public void sendMessage(ChangeEvent event) throws EventPublisherException { String message = CatalogExceptionMessage.eventPublisherFailedToPublish(EMAIL, event, e.getMessage()); LOG.error(message); - throw new EventPublisherException(message, Pair.of(subscriptionDestination.getId(), event)); + throw new EventPublisherException( + CatalogExceptionMessage.eventPublisherFailedToPublish(EMAIL, e.getMessage()), + Pair.of(subscriptionDestination.getId(), event)); } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/feed/ActivityFeedPublisher.java b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/feed/ActivityFeedPublisher.java index e7226cb6b5ea..218c565d6258 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/feed/ActivityFeedPublisher.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/feed/ActivityFeedPublisher.java @@ -73,7 +73,8 @@ public void sendMessage(ChangeEvent changeEvent) throws EventPublisherException ACTIVITY_FEED, changeEvent, ex.getMessage()); LOG.error(message); throw new EventPublisherException( - message, Pair.of(subscriptionDestination.getId(), changeEvent)); + CatalogExceptionMessage.eventPublisherFailedToPublish(ACTIVITY_FEED, ex.getMessage()), + Pair.of(subscriptionDestination.getId(), changeEvent)); } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/gchat/GChatPublisher.java b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/gchat/GChatPublisher.java index 95428bb28fba..7d7b457fc185 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/gchat/GChatPublisher.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/gchat/GChatPublisher.java @@ -90,7 +90,9 @@ public void sendMessage(ChangeEvent event) throws EventPublisherException { String message = CatalogExceptionMessage.eventPublisherFailedToPublish(G_CHAT, event, e.getMessage()); LOG.error(message); - throw new EventPublisherException(message, Pair.of(subscriptionDestination.getId(), event)); + throw new EventPublisherException( + CatalogExceptionMessage.eventPublisherFailedToPublish(G_CHAT, e.getMessage()), + Pair.of(subscriptionDestination.getId(), event)); } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/generic/GenericPublisher.java b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/generic/GenericPublisher.java index 3defa6b7a1f3..ec005aff60b8 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/generic/GenericPublisher.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/generic/GenericPublisher.java @@ -125,7 +125,9 @@ private void handleException(long attemptTime, ChangeEvent event, Exception ex) String message = CatalogExceptionMessage.eventPublisherFailedToPublish(WEBHOOK, event, ex.getMessage()); LOG.error(message); - throw new EventPublisherException(message, Pair.of(subscriptionDestination.getId(), event)); + throw new EventPublisherException( + CatalogExceptionMessage.eventPublisherFailedToPublish(WEBHOOK, ex.getMessage()), + Pair.of(subscriptionDestination.getId(), event)); } private void handleException(long attemptTime, Exception ex) throws EventPublisherException { diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/msteams/MSTeamsPublisher.java b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/msteams/MSTeamsPublisher.java index 2ec37c01d197..6e7973ab30d2 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/msteams/MSTeamsPublisher.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/msteams/MSTeamsPublisher.java @@ -100,7 +100,9 @@ public void sendMessage(ChangeEvent event) throws EventPublisherException { String message = CatalogExceptionMessage.eventPublisherFailedToPublish(MS_TEAMS, event, e.getMessage()); LOG.error(message); - throw new EventPublisherException(message, Pair.of(subscriptionDestination.getId(), event)); + throw new EventPublisherException( + CatalogExceptionMessage.eventPublisherFailedToPublish(MS_TEAMS, e.getMessage()), + Pair.of(subscriptionDestination.getId(), event)); } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/slack/SlackEventPublisher.java b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/slack/SlackEventPublisher.java index 034c7fd82bab..553b8b4cb992 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/slack/SlackEventPublisher.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/apps/bundles/changeEvent/slack/SlackEventPublisher.java @@ -97,7 +97,9 @@ public void sendMessage(ChangeEvent event) throws EventPublisherException { String message = CatalogExceptionMessage.eventPublisherFailedToPublish(SLACK, event, e.getMessage()); LOG.error(message); - throw new EventPublisherException(message, Pair.of(subscriptionDestination.getId(), event)); + throw new EventPublisherException( + CatalogExceptionMessage.eventPublisherFailedToPublish(SLACK, e.getMessage()), + Pair.of(subscriptionDestination.getId(), event)); } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/events/scheduled/EventSubscriptionScheduler.java b/openmetadata-service/src/main/java/org/openmetadata/service/events/scheduled/EventSubscriptionScheduler.java index 3d34bcd3393d..68e3cbde3ec3 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/events/scheduled/EventSubscriptionScheduler.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/events/scheduled/EventSubscriptionScheduler.java @@ -17,19 +17,27 @@ import static org.openmetadata.service.apps.bundles.changeEvent.AbstractEventConsumer.ALERT_OFFSET_KEY; import static org.openmetadata.service.events.subscription.AlertUtil.getStartingOffset; +import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.UUID; +import java.util.stream.Collectors; import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.jdbi.v3.sqlobject.transaction.Transaction; +import org.openmetadata.common.utils.CommonUtil; import org.openmetadata.schema.EntityInterface; +import org.openmetadata.schema.api.events.EventSubscriptionDiagnosticInfo; import org.openmetadata.schema.entity.events.EventSubscription; import org.openmetadata.schema.entity.events.EventSubscriptionOffset; +import org.openmetadata.schema.entity.events.FailedEventResponse; import org.openmetadata.schema.entity.events.SubscriptionDestination; import org.openmetadata.schema.entity.events.SubscriptionStatus; +import org.openmetadata.schema.type.ChangeEvent; import org.openmetadata.service.Entity; import org.openmetadata.service.apps.bundles.changeEvent.AlertPublisher; import org.openmetadata.service.jdbi3.EntityRepository; +import org.openmetadata.service.util.JsonUtils; import org.quartz.JobBuilder; import org.quartz.JobDataMap; import org.quartz.JobDetail; @@ -156,54 +164,184 @@ public void deleteEventSubscriptionPublisher(EventSubscription deletedEntity) } public SubscriptionStatus getStatusForEventSubscription(UUID subscriptionId, UUID destinationId) { - EventSubscription eventSubscription = getEventSubscriptionFromScheduledJob(subscriptionId); - if (eventSubscription == null) { - EntityRepository subscriptionRepository = - Entity.getEntityRepository(Entity.EVENT_SUBSCRIPTION); - EventSubscription subscription = - (EventSubscription) - subscriptionRepository.get( - null, subscriptionId, subscriptionRepository.getFields("id")); - if (subscription != null && (Boolean.FALSE.equals(subscription.getEnabled()))) { - return new SubscriptionStatus().withStatus(SubscriptionStatus.Status.DISABLED); - } + Optional eventSubscriptionOpt = + getEventSubscriptionFromScheduledJob(subscriptionId); + + if (eventSubscriptionOpt.isPresent()) { + return eventSubscriptionOpt.get().getDestinations().stream() + .filter(destination -> destination.getId().equals(destinationId)) + .map(SubscriptionDestination::getStatusDetails) + .findFirst() + .orElse(null); + } + + EntityRepository subscriptionRepository = + Entity.getEntityRepository(Entity.EVENT_SUBSCRIPTION); + + // If the event subscription was not found in the scheduled job, check the repository + Optional subscriptionOpt = + Optional.ofNullable( + (EventSubscription) + subscriptionRepository.get( + null, subscriptionId, subscriptionRepository.getFields("id"))); + + return subscriptionOpt + .filter(subscription -> Boolean.FALSE.equals(subscription.getEnabled())) + .map( + subscription -> new SubscriptionStatus().withStatus(SubscriptionStatus.Status.DISABLED)) + .orElse(null); + } + + public List listAlertDestinations(UUID subscriptionId) { + Optional eventSubscriptionOpt = + getEventSubscriptionFromScheduledJob(subscriptionId); + + // If the EventSubscription is not found in the scheduled job, retrieve it from the repository + EventSubscription eventSubscription = + eventSubscriptionOpt.orElseGet( + () -> { + EntityRepository subscriptionRepository = + Entity.getEntityRepository(Entity.EVENT_SUBSCRIPTION); + + return (EventSubscription) + subscriptionRepository.get( + null, + subscriptionId, + subscriptionRepository.getFields("id,destinations,enabled")); + }); + + if (eventSubscription != null && Boolean.FALSE.equals(eventSubscription.getEnabled())) { + return Collections.emptyList(); + } + + return eventSubscription.getDestinations(); + } + + public EventSubscriptionDiagnosticInfo getEventSubscriptionDiagnosticInfo( + UUID subscriptionId, int limit) { + boolean isAllEventsPublished = checkIfPublisherPublishedAllEvents(subscriptionId); + EventSubscriptionOffset latestOffset = getLatestOffset(); + + long currentOffset = + getEventSubscriptionOffset(subscriptionId) + .map(EventSubscriptionOffset::getOffset) + .orElse(0L); + + long unpublishedEventCount = getUnpublishedEventCount(subscriptionId); + List unprocessedEvents = + Optional.ofNullable(getUnpublishedEvents(subscriptionId, limit)) + .orElse(Collections.emptyList()); + + return new EventSubscriptionDiagnosticInfo() + .withLatestOffset(latestOffset.getOffset()) + .withCurrentOffset(currentOffset) + .withHasProcessedAllEvents(isAllEventsPublished) + .withUnprocessedEventsCount(unpublishedEventCount) + .withUnprocessedEventsList(unprocessedEvents); + } + + public static EventSubscriptionOffset getLatestOffset() { + return new EventSubscriptionOffset() + .withOffset(Entity.getCollectionDAO().changeEventDAO().getLatestOffset()); + } + + public boolean checkIfPublisherPublishedAllEvents(UUID subscriptionID) { + long countOfEvents = Entity.getCollectionDAO().changeEventDAO().getLatestOffset(); + + return getEventSubscriptionOffset(subscriptionID) + .map(offset -> offset.getOffset() == countOfEvents) + .orElse(false); + } + + public long getUnpublishedEventCount(UUID subscriptionID) { + long countOfEvents = Entity.getCollectionDAO().changeEventDAO().getLatestOffset(); + + return getEventSubscriptionOffset(subscriptionID) + .map(offset -> Math.abs(countOfEvents - offset.getOffset())) + .orElse(countOfEvents); + } + + public List getUnpublishedEvents(UUID subscriptionId, int limit) { + long offset = + getEventSubscriptionOffset(subscriptionId) + .map(EventSubscriptionOffset::getOffset) + .orElse(Entity.getCollectionDAO().changeEventDAO().getLatestOffset()); + + List unprocessedEventJsonList = + Entity.getCollectionDAO().changeEventDAO().listUnprocessedEvents(offset, limit); + + return unprocessedEventJsonList.stream() + .map(eventJson -> JsonUtils.readValue(eventJson, ChangeEvent.class)) + .collect(Collectors.toList()); + } + + public List getFailedEventsByIdAndSource( + UUID subscriptionId, String source, int limit) { + if (CommonUtil.nullOrEmpty(source)) { + return Entity.getCollectionDAO() + .changeEventDAO() + .listFailedEventsById(subscriptionId.toString(), limit); } else { - List subscriptions = - eventSubscription.getDestinations().stream() - .filter(sub -> sub.getId().equals(destinationId)) - .toList(); - if (subscriptions.size() == 1) { - // We have unique Ids per destination - return subscriptions.get(0).getStatusDetails(); - } + return Entity.getCollectionDAO() + .changeEventDAO() + .listFailedEventsByIdAndSource(subscriptionId.toString(), source, limit); + } + } + + public List getFailedEventsById(UUID subscriptionId, int limit) { + return Entity.getCollectionDAO() + .changeEventDAO() + .listFailedEventsById(subscriptionId.toString(), limit); + } + + public List getAllFailedEvents(String source, int limit) { + if (CommonUtil.nullOrEmpty(source)) { + return Entity.getCollectionDAO().changeEventDAO().listAllFailedEvents(limit); + } else { + return Entity.getCollectionDAO().changeEventDAO().listAllFailedEventsBySource(source, limit); } - return null; } - public EventSubscription getEventSubscriptionFromScheduledJob(UUID id) { + public List getSuccessfullySentChangeEventsForAlert(UUID id, int limit) { + Optional eventSubscriptionOffset = getEventSubscriptionOffset(id); + + return eventSubscriptionOffset + .map( + offset -> { + List jsonEvents = + Entity.getCollectionDAO() + .changeEventDAO() + .listChangeEventsBeforeOffset(limit, offset.getOffset()); + + return jsonEvents.stream() + .map(json -> JsonUtils.readValue(json, ChangeEvent.class)) + .collect(Collectors.toList()); + }) + .orElse(Collections.emptyList()); + } + + public Optional getEventSubscriptionFromScheduledJob(UUID id) { try { JobDetail jobDetail = alertsScheduler.getJobDetail(new JobKey(id.toString(), ALERT_JOB_GROUP)); - if (jobDetail != null) { - return ((EventSubscription) jobDetail.getJobDataMap().get(ALERT_INFO_KEY)); - } + + return Optional.ofNullable(jobDetail) + .map(detail -> (EventSubscription) detail.getJobDataMap().get(ALERT_INFO_KEY)); + } catch (SchedulerException ex) { - LOG.error("Failed to get Event Subscription from Job, Subscription Id : {}", id); + LOG.error("Failed to get Event Subscription from Job, Subscription Id : {}", id, ex); } - return null; + + return Optional.empty(); } - public boolean checkIfPublisherPublishedAllEvents(UUID subscriptionID) { - long countOfEvents = Entity.getCollectionDAO().changeEventDAO().getLatestOffset(); + public Optional getEventSubscriptionOffset(UUID subscriptionID) { try { JobDetail jobDetail = alertsScheduler.getJobDetail(new JobKey(subscriptionID.toString(), ALERT_JOB_GROUP)); if (jobDetail != null) { - EventSubscriptionOffset offset = - ((EventSubscriptionOffset) jobDetail.getJobDataMap().get(ALERT_OFFSET_KEY)); - if (offset != null) { - return offset.getOffset() == countOfEvents; - } + return Optional.ofNullable( + (EventSubscriptionOffset) jobDetail.getJobDataMap().get(ALERT_OFFSET_KEY)); } } catch (Exception ex) { LOG.error( @@ -211,7 +349,11 @@ public boolean checkIfPublisherPublishedAllEvents(UUID subscriptionID) { subscriptionID.toString(), ex); } - return false; + return Optional.empty(); + } + + public boolean doesRecordExist(UUID id) { + return Entity.getCollectionDAO().changeEventDAO().recordExists(id.toString()) > 0; } public static void shutDown() throws SchedulerException { diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/exception/CatalogExceptionMessage.java b/openmetadata-service/src/main/java/org/openmetadata/service/exception/CatalogExceptionMessage.java index b52680c2a1a7..2219c0e243cc 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/exception/CatalogExceptionMessage.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/exception/CatalogExceptionMessage.java @@ -346,7 +346,8 @@ public static String eventPublisherFailedToPublish( public static String eventPublisherFailedToPublish( SubscriptionDestination.SubscriptionType type, String message) { - return String.format("Failed to publish event %s due to %s ", type.value(), message); + return String.format( + "Failed to publish event of destination type %s due to %s ", type.value(), message); } public static String invalidTaskField(EntityLink entityLink, TaskType taskType) { diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/APICollectionRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/APICollectionRepository.java index 8c6551c0988a..d4cbda5233af 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/APICollectionRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/APICollectionRepository.java @@ -39,6 +39,7 @@ public APICollectionRepository() { "", ""); supportsSearch = true; + parent = true; } @Override diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/APIServiceRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/APIServiceRepository.java index 5d6f9f619937..6ea4a0f692ca 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/APIServiceRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/APIServiceRepository.java @@ -16,5 +16,6 @@ public APIServiceRepository() { "", ServiceType.API); supportsSearch = true; + parent = true; } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/ClassificationRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/ClassificationRepository.java index 2eea460d72ea..03635db04a08 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/ClassificationRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/ClassificationRepository.java @@ -54,6 +54,7 @@ public ClassificationRepository() { quoteFqn = true; supportsSearch = true; renameAllowed = true; + parent = true; } @Override diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/CollectionDAO.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/CollectionDAO.java index f6051c5010d1..be5416509113 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/CollectionDAO.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/CollectionDAO.java @@ -59,12 +59,14 @@ import org.openmetadata.schema.analytics.WebAnalyticEvent; import org.openmetadata.schema.api.configuration.LoginConfiguration; import org.openmetadata.schema.api.configuration.profiler.ProfilerConfiguration; -import org.openmetadata.schema.api.searcg.SearchSettings; +import org.openmetadata.schema.api.lineage.LineageSettings; +import org.openmetadata.schema.api.search.SearchSettings; import org.openmetadata.schema.auth.EmailVerificationToken; import org.openmetadata.schema.auth.PasswordResetToken; import org.openmetadata.schema.auth.PersonalAccessToken; import org.openmetadata.schema.auth.RefreshToken; import org.openmetadata.schema.auth.TokenType; +import org.openmetadata.schema.configuration.AssetCertificationSettings; import org.openmetadata.schema.dataInsight.DataInsightChart; import org.openmetadata.schema.dataInsight.custom.DataInsightCustomChart; import org.openmetadata.schema.dataInsight.kpi.Kpi; @@ -99,6 +101,8 @@ import org.openmetadata.schema.entity.domains.DataProduct; import org.openmetadata.schema.entity.domains.Domain; import org.openmetadata.schema.entity.events.EventSubscription; +import org.openmetadata.schema.entity.events.FailedEvent; +import org.openmetadata.schema.entity.events.FailedEventResponse; import org.openmetadata.schema.entity.policies.Policy; import org.openmetadata.schema.entity.services.ApiService; import org.openmetadata.schema.entity.services.DashboardService; @@ -2088,18 +2092,22 @@ void upsertSubscriberExtension( @ConnectionAwareSqlUpdate( value = - "INSERT INTO consumers_dlq(id, extension, json) " - + "VALUES (:id, :extension, :json)" - + "ON DUPLICATE KEY UPDATE json = :json", + "INSERT INTO consumers_dlq(id, extension, json, source) " + + "VALUES (:id, :extension, :json, :source) " + + "ON DUPLICATE KEY UPDATE json = :json, source = :source", connectionType = MYSQL) @ConnectionAwareSqlUpdate( value = - "INSERT INTO consumers_dlq(id, extension, json) " - + "VALUES (:id, :extension, (:json :: jsonb)) ON CONFLICT (id, extension) " - + "DO UPDATE SET json = EXCLUDED.json", + "INSERT INTO consumers_dlq(id, extension, json, source) " + + "VALUES (:id, :extension, (:json :: jsonb), :source) " + + "ON CONFLICT (id, extension) " + + "DO UPDATE SET json = EXCLUDED.json, source = EXCLUDED.source", connectionType = POSTGRES) void upsertFailedEvent( - @Bind("id") String id, @Bind("extension") String extension, @Bind("json") String json); + @Bind("id") String id, + @Bind("extension") String extension, + @Bind("json") String json, + @Bind("source") String source); } interface ChartDAO extends EntityDAO { @@ -3905,6 +3913,40 @@ default User findEntityByName(String fqn, Include include) { } interface ChangeEventDAO { + @SqlQuery( + "SELECT json FROM change_event ce where ce.offset > :offset ORDER BY ce.eventTime ASC LIMIT :limit") + List listUnprocessedEvents(@Bind("offset") long offset, @Bind("limit") long limit); + + @SqlQuery( + "SELECT json, source FROM consumers_dlq WHERE id = :id ORDER BY timestamp ASC LIMIT :limit") + @RegisterRowMapper(FailedEventResponseMapper.class) + List listFailedEventsById( + @Bind("id") String id, @Bind("limit") long limit); + + @SqlQuery( + "SELECT json, source FROM consumers_dlq WHERE id = :id AND source = :source ORDER BY timestamp ASC LIMIT :limit") + @RegisterRowMapper(FailedEventResponseMapper.class) + List listFailedEventsByIdAndSource( + @Bind("id") String id, @Bind("source") String source, @Bind("limit") long limit); + + @SqlQuery("SELECT json, source FROM consumers_dlq LIMIT :limit") + @RegisterRowMapper(FailedEventResponseMapper.class) + List listAllFailedEvents(@Bind("limit") long limit); + + @SqlQuery("SELECT json, source FROM consumers_dlq WHERE source = :source LIMIT :limit") + @RegisterRowMapper(FailedEventResponseMapper.class) + List listAllFailedEventsBySource( + @Bind("source") String source, @Bind("limit") long limit); + + @SqlQuery( + "SELECT json FROM change_event ce where ce.offset < :offset ORDER BY ce.eventTime ASC LIMIT :limit") + List listChangeEventsBeforeOffset( + @Bind("limit") long limit, @Bind("offset") long offset); + + @SqlQuery( + "SELECT CASE WHEN EXISTS (SELECT 1 FROM event_subscription_entity WHERE id = :id) THEN 1 ELSE 0 END AS record_exists") + int recordExists(@Bind("id") String id); + @ConnectionAwareSqlUpdate( value = "INSERT INTO change_event (json) VALUES (:json)", connectionType = MYSQL) @@ -3953,6 +3995,20 @@ List listWithoutEntityFilter( long getLatestOffset(); } + class FailedEventResponseMapper implements RowMapper { + @Override + public FailedEventResponse map(ResultSet rs, StatementContext ctx) throws SQLException { + FailedEventResponse response = new FailedEventResponse(); + FailedEvent failedEvent = JsonUtils.readValue(rs.getString("json"), FailedEvent.class); + response.setFailingSubscriptionId(failedEvent.getFailingSubscriptionId()); + response.setChangeEvent(failedEvent.getChangeEvent()); + response.setReason(failedEvent.getReason()); + response.setSource(rs.getString("source")); + response.setTimestamp(failedEvent.getTimestamp()); + return response; + } + } + interface TypeEntityDAO extends EntityDAO { @Override default String getTableName() { @@ -4910,6 +4966,9 @@ public static Settings getSettings(SettingsType configType, String json) { .readValue(json, String.class); case PROFILER_CONFIGURATION -> JsonUtils.readValue(json, ProfilerConfiguration.class); case SEARCH_SETTINGS -> JsonUtils.readValue(json, SearchSettings.class); + case ASSET_CERTIFICATION_SETTINGS -> JsonUtils.readValue( + json, AssetCertificationSettings.class); + case LINEAGE_SETTINGS -> JsonUtils.readValue(json, LineageSettings.class); default -> throw new IllegalArgumentException("Invalid Settings Type " + configType); }; settings.setConfigValue(value); diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DashboardServiceRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DashboardServiceRepository.java index 8bf66d6e20c5..63811a6df5be 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DashboardServiceRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DashboardServiceRepository.java @@ -33,5 +33,6 @@ public DashboardServiceRepository() { "", ServiceType.DASHBOARD); supportsSearch = true; + parent = true; } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseRepository.java index 98330c65b6f9..00593019b22b 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseRepository.java @@ -13,6 +13,7 @@ package org.openmetadata.service.jdbi3; +import static org.openmetadata.csv.CsvUtil.addExtension; import static org.openmetadata.csv.CsvUtil.addField; import static org.openmetadata.csv.CsvUtil.addGlossaryTerms; import static org.openmetadata.csv.CsvUtil.addOwners; @@ -68,6 +69,8 @@ public DatabaseRepository() { "", ""); supportsSearch = true; + parent = true; + fieldFetchers.put("name", this::fetchAndSetService); } @Override @@ -125,7 +128,8 @@ public String exportToCsv(String name, String user) throws IOException { (DatabaseSchemaRepository) Entity.getEntityRepository(DATABASE_SCHEMA); List schemas = repository.listAllForCSV( - repository.getFields("owners,tags,domain"), database.getFullyQualifiedName()); + repository.getFields("owners,tags,domain,extension"), database.getFullyQualifiedName()); + schemas.sort(Comparator.comparing(EntityInterface::getFullyQualifiedName)); return new DatabaseCsv(database, user).exportCsv(schemas); } @@ -224,6 +228,17 @@ public Database deleteDatabaseProfilerConfig(UUID databaseId) { return database; } + private void fetchAndSetService(List entities, Fields fields) { + if (entities == null || entities.isEmpty() || (!fields.contains("name"))) { + return; + } + + EntityReference service = getContainer(entities.get(0).getId()); + for (Database database : entities) { + database.setService(service); + } + } + public class DatabaseUpdater extends EntityUpdater { public DatabaseUpdater(Database original, Database updated, Operation operation) { super(original, updated, operation); @@ -282,7 +297,8 @@ protected void createEntity(CSVPrinter printer, List csvRecords) thro .withTags(tagLabels) .withRetentionPeriod(csvRecord.get(7)) .withSourceUrl(csvRecord.get(8)) - .withDomain(getEntityReference(printer, csvRecord, 9, Entity.DOMAIN)); + .withDomain(getEntityReference(printer, csvRecord, 9, Entity.DOMAIN)) + .withExtension(getExtension(printer, csvRecord, 10)); if (processRecord) { createEntity(printer, csvRecord, schema); } @@ -306,6 +322,7 @@ protected void addRecord(CsvFile csvFile, DatabaseSchema entity) { ? "" : entity.getDomain().getFullyQualifiedName(); addField(recordList, domain); + addExtension(recordList, entity.getExtension()); addRecord(csvFile, recordList); } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseSchemaRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseSchemaRepository.java index 1117b61a3ec0..528ef99baa1f 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseSchemaRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseSchemaRepository.java @@ -14,6 +14,7 @@ package org.openmetadata.service.jdbi3; import static org.openmetadata.common.utils.CommonUtil.nullOrEmpty; +import static org.openmetadata.csv.CsvUtil.addExtension; import static org.openmetadata.csv.CsvUtil.addField; import static org.openmetadata.csv.CsvUtil.addGlossaryTerms; import static org.openmetadata.csv.CsvUtil.addOwners; @@ -73,6 +74,7 @@ public DatabaseSchemaRepository() { "", ""); supportsSearch = true; + parent = true; } @Override @@ -196,9 +198,11 @@ public void entityRelationshipReindex(DatabaseSchema original, DatabaseSchema up public String exportToCsv(String name, String user) throws IOException { DatabaseSchema schema = getByName(null, name, Fields.EMPTY_FIELDS); // Validate database schema TableRepository repository = (TableRepository) Entity.getEntityRepository(TABLE); + List tables = repository.listAllForCSV( - repository.getFields("owners,tags,domain"), schema.getFullyQualifiedName()); + repository.getFields("owners,tags,domain,extension"), schema.getFullyQualifiedName()); + tables.sort(Comparator.comparing(EntityInterface::getFullyQualifiedName)); return new DatabaseSchemaCsv(schema, user).exportCsv(tables); } @@ -315,7 +319,8 @@ protected void createEntity(CSVPrinter printer, List csvRecords) thro .withRetentionPeriod(csvRecord.get(7)) .withSourceUrl(csvRecord.get(8)) .withColumns(nullOrEmpty(table.getColumns()) ? new ArrayList<>() : table.getColumns()) - .withDomain(getEntityReference(printer, csvRecord, 9, Entity.DOMAIN)); + .withDomain(getEntityReference(printer, csvRecord, 9, Entity.DOMAIN)) + .withExtension(getExtension(printer, csvRecord, 10)); if (processRecord) { createEntity(printer, csvRecord, table); @@ -340,6 +345,7 @@ protected void addRecord(CsvFile csvFile, Table entity) { ? "" : entity.getDomain().getFullyQualifiedName(); addField(recordList, domain); + addExtension(recordList, entity.getExtension()); addRecord(csvFile, recordList); } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseServiceRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseServiceRepository.java index 959ce9d8d3ab..104f9d846e2c 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseServiceRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DatabaseServiceRepository.java @@ -13,6 +13,7 @@ package org.openmetadata.service.jdbi3; +import static org.openmetadata.csv.CsvUtil.addExtension; import static org.openmetadata.csv.CsvUtil.addField; import static org.openmetadata.csv.CsvUtil.addGlossaryTerms; import static org.openmetadata.csv.CsvUtil.addOwners; @@ -59,6 +60,7 @@ public DatabaseServiceRepository() { "", ServiceType.DATABASE); supportsSearch = true; + parent = true; } @Override @@ -68,7 +70,9 @@ public String exportToCsv(String name, String user) throws IOException { DatabaseRepository repository = (DatabaseRepository) Entity.getEntityRepository(DATABASE); List databases = repository.listAllForCSV( - repository.getFields("owners,tags,domain"), databaseService.getFullyQualifiedName()); + repository.getFields("name,owners,tags,domain,extension"), + databaseService.getFullyQualifiedName()); + databases.sort(Comparator.comparing(EntityInterface::getFullyQualifiedName)); return new DatabaseServiceCsv(databaseService, user).exportCsv(databases); } @@ -122,7 +126,8 @@ protected void createEntity(CSVPrinter printer, List csvRecords) thro .withDescription(csvRecord.get(2)) .withOwners(getOwners(printer, csvRecord, 3)) .withTags(tagLabels) - .withDomain(getEntityReference(printer, csvRecord, 7, Entity.DOMAIN)); + .withDomain(getEntityReference(printer, csvRecord, 7, Entity.DOMAIN)) + .withExtension(getExtension(printer, csvRecord, 8)); if (processRecord) { createEntity(printer, csvRecord, database); @@ -145,6 +150,7 @@ protected void addRecord(CsvFile csvFile, Database entity) { ? "" : entity.getDomain().getFullyQualifiedName(); addField(recordList, domain); + addExtension(recordList, entity.getExtension()); addRecord(csvFile, recordList); } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DomainRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DomainRepository.java index a7e6e54efcf8..707c9f3f97a9 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DomainRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/DomainRepository.java @@ -46,6 +46,7 @@ public DomainRepository() { UPDATE_FIELDS, UPDATE_FIELDS); supportsSearch = true; + parent = true; } @Override diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/EntityRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/EntityRepository.java index 4f6d9540a7a9..e71a280314b3 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/EntityRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/EntityRepository.java @@ -29,6 +29,7 @@ import static org.openmetadata.service.Entity.ADMIN_USER_NAME; import static org.openmetadata.service.Entity.DATA_PRODUCT; import static org.openmetadata.service.Entity.DOMAIN; +import static org.openmetadata.service.Entity.FIELD_CERTIFICATION; import static org.openmetadata.service.Entity.FIELD_CHILDREN; import static org.openmetadata.service.Entity.FIELD_DATA_PRODUCTS; import static org.openmetadata.service.Entity.FIELD_DELETED; @@ -78,8 +79,11 @@ import com.networknt.schema.ValidationMessage; import java.io.IOException; import java.net.URI; +import java.time.Instant; import java.time.LocalDateTime; import java.time.LocalTime; +import java.time.Period; +import java.time.ZoneOffset; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeParseException; import java.time.temporal.TemporalAccessor; @@ -121,12 +125,14 @@ import org.openmetadata.schema.api.VoteRequest.VoteType; import org.openmetadata.schema.api.feed.ResolveTask; import org.openmetadata.schema.api.teams.CreateTeam; +import org.openmetadata.schema.configuration.AssetCertificationSettings; import org.openmetadata.schema.entity.data.Table; import org.openmetadata.schema.entity.feed.Suggestion; import org.openmetadata.schema.entity.teams.Team; import org.openmetadata.schema.entity.teams.User; import org.openmetadata.schema.system.EntityError; import org.openmetadata.schema.type.ApiStatus; +import org.openmetadata.schema.type.AssetCertification; import org.openmetadata.schema.type.ChangeDescription; import org.openmetadata.schema.type.ChangeEvent; import org.openmetadata.schema.type.Column; @@ -235,6 +241,7 @@ public record EntityHistoryWithOffset(EntityHistory entityHistory, int nextOffse @Getter protected final boolean supportsOwners; @Getter protected final boolean supportsStyle; @Getter protected final boolean supportsLifeCycle; + @Getter protected final boolean supportsCertification; protected final boolean supportsFollower; protected final boolean supportsExtension; protected final boolean supportsVotes; @@ -253,6 +260,7 @@ public record EntityHistoryWithOffset(EntityHistory entityHistory, int nextOffse @Getter protected final Fields putFields; protected boolean supportsSearch = false; + @Getter protected boolean parent = false; protected final Map, Fields>> fieldFetchers = new HashMap<>(); protected EntityRepository( @@ -328,6 +336,11 @@ protected EntityRepository( this.patchFields.addField(allowedFields, FIELD_LIFE_CYCLE); this.putFields.addField(allowedFields, FIELD_LIFE_CYCLE); } + this.supportsCertification = allowedFields.contains(FIELD_CERTIFICATION); + if (supportsCertification) { + this.patchFields.addField(allowedFields, FIELD_CERTIFICATION); + this.putFields.addField(allowedFields, FIELD_CERTIFICATION); + } Map, Fields>>> fieldSupportMap = new HashMap<>(); @@ -2627,6 +2640,7 @@ private void updateInternal() { updateReviewers(); updateStyle(); updateLifeCycle(); + updateCertification(); entitySpecificUpdate(); } } @@ -2929,6 +2943,53 @@ private void updateLifeCycle() { recordChange(FIELD_LIFE_CYCLE, origLifeCycle, updatedLifeCycle, true); } + private void updateCertification() { + if (!supportsCertification) { + return; + } + AssetCertification origCertification = original.getCertification(); + AssetCertification updatedCertification = updated.getCertification(); + + if (origCertification == updatedCertification || updatedCertification == null) return; + + SystemRepository systemRepository = Entity.getSystemRepository(); + AssetCertificationSettings assetCertificationSettings = + systemRepository.getAssetCertificationSettings(); + + String certificationLabel = updatedCertification.getTagLabel().getTagFQN(); + + validateCertification(certificationLabel, assetCertificationSettings); + + long certificationDate = System.currentTimeMillis(); + updatedCertification.setAppliedDate(certificationDate); + + LocalDateTime nowDateTime = + LocalDateTime.ofInstant(Instant.ofEpochMilli(certificationDate), ZoneOffset.UTC); + Period datePeriod = Period.parse(assetCertificationSettings.getValidityPeriod()); + LocalDateTime targetDateTime = nowDateTime.plus(datePeriod); + updatedCertification.setExpiryDate(targetDateTime.toInstant(ZoneOffset.UTC).toEpochMilli()); + + recordChange(FIELD_CERTIFICATION, origCertification, updatedCertification, true); + } + + private void validateCertification( + String certificationLabel, AssetCertificationSettings assetCertificationSettings) { + if (Optional.ofNullable(assetCertificationSettings).isEmpty()) { + throw new IllegalArgumentException( + "Certification is not configured. Please configure the Classification used for Certification in the Settings."); + } else { + String allowedClassification = assetCertificationSettings.getAllowedClassification(); + String[] fqnParts = FullyQualifiedName.split(certificationLabel); + String parentFqn = FullyQualifiedName.getParentFQN(fqnParts); + if (!allowedClassification.equals(parentFqn)) { + throw new IllegalArgumentException( + String.format( + "Invalid Classification: %s is not valid for Certification.", + certificationLabel)); + } + } + } + public final boolean updateVersion(Double oldVersion) { Double newVersion = oldVersion; if (majorVersionChange) { @@ -3216,7 +3277,9 @@ public static void setSessionTimeout(long timeout) { private boolean consolidateChanges(T original, T updated, Operation operation) { // If user is the same and the new update is with in the user session timeout - return original.getVersion() > 0.1 // First update on an entity that + return !parent // Parent entity shouldn't consolidate changes, as we need ChangeDescription to + // propagate to children + && original.getVersion() > 0.1 // First update on an entity that && operation == Operation.PATCH && !Boolean.TRUE.equals(original.getDeleted()) // Entity is not soft deleted && !operation.isDelete() // Operation must be an update @@ -3225,6 +3288,7 @@ private boolean consolidateChanges(T original, T updated, Operation operation) { .equals(updated.getUpdatedBy()) // Must be updated by the same user && updated.getUpdatedAt() - original.getUpdatedAt() <= sessionTimeoutMillis; // With in session timeout + // changes to children } private T getPreviousVersion(T original) { diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/GlossaryRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/GlossaryRepository.java index 675bbacc4677..3701ef0a7ae4 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/GlossaryRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/GlossaryRepository.java @@ -89,6 +89,7 @@ public GlossaryRepository() { quoteFqn = true; supportsSearch = true; renameAllowed = true; + parent = true; } @Override diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MessagingServiceRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MessagingServiceRepository.java index 9f3554b92c18..2d757f9ac87e 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MessagingServiceRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MessagingServiceRepository.java @@ -34,5 +34,6 @@ public MessagingServiceRepository() { UPDATE_FIELDS, ServiceType.MESSAGING); supportsSearch = true; + parent = true; } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MetadataServiceRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MetadataServiceRepository.java index 5f9094143e55..d0cc53ba9231 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MetadataServiceRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MetadataServiceRepository.java @@ -19,5 +19,6 @@ public MetadataServiceRepository() { UPDATE_FIELDS, ServiceType.METADATA); supportsSearch = true; + parent = true; } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MlModelServiceRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MlModelServiceRepository.java index ecd8d17d09f0..0d15888b0a5d 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MlModelServiceRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/MlModelServiceRepository.java @@ -34,5 +34,6 @@ public MlModelServiceRepository() { UPDATE_FIELDS, ServiceType.ML_MODEL); supportsSearch = true; + parent = true; } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/PipelineServiceRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/PipelineServiceRepository.java index e1281c96835d..bcadae3b80ea 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/PipelineServiceRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/PipelineServiceRepository.java @@ -33,5 +33,6 @@ public PipelineServiceRepository() { "", ServiceType.PIPELINE); supportsSearch = true; + parent = true; } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/SearchServiceRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/SearchServiceRepository.java index b3acc9d2a95e..4cd0f5bb09ba 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/SearchServiceRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/SearchServiceRepository.java @@ -17,5 +17,6 @@ public SearchServiceRepository() { "", ServiceType.SEARCH); supportsSearch = true; + parent = true; } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/StorageServiceRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/StorageServiceRepository.java index 887d5e6e5954..b101cbc193c8 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/StorageServiceRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/StorageServiceRepository.java @@ -17,5 +17,6 @@ public StorageServiceRepository() { "", ServiceType.STORAGE); supportsSearch = true; + parent = true; } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/SystemRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/SystemRepository.java index 9f710e6ef651..867b450f58ad 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/SystemRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/SystemRepository.java @@ -9,6 +9,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Optional; import javax.json.JsonPatch; import javax.json.JsonValue; import javax.ws.rs.core.Response; @@ -16,6 +17,7 @@ import lombok.extern.slf4j.Slf4j; import org.jdbi.v3.sqlobject.transaction.Transaction; import org.openmetadata.api.configuration.UiThemePreference; +import org.openmetadata.schema.configuration.AssetCertificationSettings; import org.openmetadata.schema.email.SmtpSettings; import org.openmetadata.schema.entity.services.ingestionPipelines.PipelineServiceClientResponse; import org.openmetadata.schema.security.client.OpenMetadataJWTClientConfig; @@ -110,6 +112,15 @@ public Settings getConfigWithKey(String key) { return null; } + public AssetCertificationSettings getAssetCertificationSettings() { + Optional oAssetCertificationSettings = + Optional.ofNullable(getConfigWithKey(SettingsType.ASSET_CERTIFICATION_SETTINGS.value())); + + return oAssetCertificationSettings + .map(settings -> (AssetCertificationSettings) settings.getConfigValue()) + .orElse(null); + } + public Settings getEmailConfigInternal() { try { Settings setting = dao.getConfigWithKey(SettingsType.EMAIL_CONFIGURATION.value()); diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/TeamRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/TeamRepository.java index ee6b38569150..1c169cc8fd3b 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/TeamRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/TeamRepository.java @@ -102,6 +102,7 @@ public TeamRepository() { TEAM_UPDATE_FIELDS); this.quoteFqn = true; supportsSearch = true; + parent = true; } @Override diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/TestSuiteRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/TestSuiteRepository.java index a582ea559982..84550114bcd8 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/TestSuiteRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/jdbi3/TestSuiteRepository.java @@ -104,6 +104,7 @@ public TestSuiteRepository() { UPDATE_FIELDS); quoteFqn = false; supportsSearch = true; + parent = true; } @Override diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/resources/events/subscription/EventSubscriptionResource.java b/openmetadata-service/src/main/java/org/openmetadata/service/resources/events/subscription/EventSubscriptionResource.java index ca56276591ff..48760fa9d984 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/resources/events/subscription/EventSubscriptionResource.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/resources/events/subscription/EventSubscriptionResource.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -51,6 +52,7 @@ import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; +import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; @@ -60,8 +62,10 @@ import org.openmetadata.common.utils.CommonUtil; import org.openmetadata.schema.api.events.CreateEventSubscription; import org.openmetadata.schema.api.events.EventSubscriptionDestinationTestRequest; +import org.openmetadata.schema.api.events.EventSubscriptionDiagnosticInfo; import org.openmetadata.schema.entity.events.EventFilterRule; import org.openmetadata.schema.entity.events.EventSubscription; +import org.openmetadata.schema.entity.events.FailedEventResponse; import org.openmetadata.schema.entity.events.SubscriptionDestination; import org.openmetadata.schema.entity.events.SubscriptionStatus; import org.openmetadata.schema.type.ChangeEvent; @@ -77,6 +81,7 @@ import org.openmetadata.service.events.scheduled.EventSubscriptionScheduler; import org.openmetadata.service.events.subscription.AlertUtil; import org.openmetadata.service.events.subscription.EventsSubscriptionRegistry; +import org.openmetadata.service.exception.EntityNotFoundException; import org.openmetadata.service.jdbi3.CollectionDAO; import org.openmetadata.service.jdbi3.EventSubscriptionRepository; import org.openmetadata.service.jdbi3.ListFilter; @@ -626,6 +631,547 @@ public void validateCondition( AlertUtil.validateExpression(expression, Boolean.class); } + @GET + @Path("id/{id}/listEvents") + @Operation( + operationId = "getEvents", + summary = "Retrieve events based on various filters", + description = + "Retrieve failed, successfully sent, or unprocessed change events, identified by alert ID, with an optional limit. If status is not provided, retrieves data from all statuses in ascending timestamp.", + responses = { + @ApiResponse(responseCode = "200", description = "Events retrieved successfully"), + @ApiResponse(responseCode = "404", description = "Entity not found"), + @ApiResponse(responseCode = "400", description = "Invalid request parameters"), + @ApiResponse(responseCode = "500", description = "Internal server error") + }) + public Response getEvents( + @Context UriInfo uriInfo, + @Context SecurityContext securityContext, + @Parameter( + description = "Status of events to retrieve (failed, successful, unprocessed)", + schema = + @Schema( + type = "string", + allowableValues = {"failed", "successful", "unprocessed"})) + @QueryParam("status") + String statusParam, + @Parameter(description = "ID of the alert or destination", schema = @Schema(type = "UUID")) + @PathParam("id") + UUID id, + @Parameter( + description = "Maximum number of events to retrieve", + schema = @Schema(type = "integer")) + @QueryParam("limit") + @DefaultValue("100") + @Min(0) + int limit) { + + authorizer.authorizeAdmin(securityContext); + + try { + List combinedEvents = new ArrayList<>(); + TypedEvent.Status status = null; + + if (statusParam != null && !statusParam.isBlank()) { + try { + status = TypedEvent.Status.fromValue(statusParam); + } catch (IllegalArgumentException e) { + throw new WebApplicationException( + "Invalid status. Must be 'failed', 'successful', or 'unprocessed'.", + Response.Status.BAD_REQUEST); + } + } + + if (status == null) { + combinedEvents.addAll(fetchEvents(TypedEvent.Status.FAILED, id, limit)); + combinedEvents.addAll(fetchEvents(TypedEvent.Status.SUCCESSFUL, id, limit)); + combinedEvents.addAll(fetchEvents(TypedEvent.Status.UNPROCESSED, id, limit)); + // Sort combined events by timestamp in ascending order. + combinedEvents.sort(Comparator.comparing(TypedEvent::getTimestamp)); + } else { + combinedEvents.addAll(fetchEvents(status, id, limit)); + } + + return Response.ok().entity(combinedEvents).build(); + } catch (EntityNotFoundException e) { + LOG.error("Entity not found for ID: {}", id, e); + return Response.status(Response.Status.NOT_FOUND) + .entity(String.format("Entity with ID %s not found.", id)) + .build(); + } catch (Exception e) { + LOG.error("Error retrieving events for ID: {}", id, e); + return Response.status(Response.Status.INTERNAL_SERVER_ERROR) + .entity(String.format("An error occurred while retrieving events. [%s]", e.getMessage())) + .build(); + } + } + + private List fetchEvents(TypedEvent.Status status, UUID id, int limit) { + List events; + switch (status) { + case FAILED -> events = + EventSubscriptionScheduler.getInstance().getFailedEventsById(id, limit); + case SUCCESSFUL -> events = + EventSubscriptionScheduler.getInstance() + .getSuccessfullySentChangeEventsForAlert(id, limit); + case UNPROCESSED -> events = + EventSubscriptionScheduler.getInstance().getUnpublishedEvents(id, limit); + default -> throw new IllegalArgumentException("Unknown event status: " + status); + } + + return events.stream() + .map( + event -> + new TypedEvent() + .withStatus(status) + .withData(List.of(event)) + .withTimestamp(Double.valueOf(extractTimestamp(event)))) + .toList(); + } + + private Long extractTimestamp(Object event) { + if (event instanceof ChangeEvent changeEvent) { + return changeEvent.getTimestamp(); + } else if (event instanceof FailedEventResponse failedEvent) { + return failedEvent.getChangeEvent().getTimestamp(); + } + throw new IllegalArgumentException("Unknown event type: " + event.getClass()); + } + + @GET + @Path("/id/{subscriptionId}/diagnosticInfo") + @Operation( + operationId = "getEventSubscriptionDiagnosticInfoById", + summary = "Get event subscription diagnostic info", + description = + "Retrieve diagnostic information for a given event subscription ID, including current and latest offsets, unprocessed events count, and more.", + responses = { + @ApiResponse( + responseCode = "200", + description = "Event subscription diagnostic info retrieved successfully", + content = + @Content( + mediaType = "application/json", + schema = @Schema(implementation = EventSubscriptionDiagnosticInfo.class))), + @ApiResponse(responseCode = "404", description = "Event subscription not found"), + @ApiResponse(responseCode = "500", description = "Internal server error") + }) + public Response getEventSubscriptionDiagnosticInfoById( + @Context UriInfo uriInfo, + @Context SecurityContext securityContext, + @Parameter(description = "Maximum number of unprocessed events returned") + @DefaultValue("100") + @Min(0) + @QueryParam("limit") + int limit, + @Parameter(description = "UUID of the Event Subscription", schema = @Schema(type = "UUID")) + @PathParam("subscriptionId") + UUID subscriptionId) { + authorizer.authorizeAdmin(securityContext); + try { + if (!EventSubscriptionScheduler.getInstance().doesRecordExist(subscriptionId)) { + return Response.status(Response.Status.NOT_FOUND) + .entity("Event subscription not found for ID: " + subscriptionId) + .build(); + } + + EventSubscriptionDiagnosticInfo diagnosticInfo = + EventSubscriptionScheduler.getInstance() + .getEventSubscriptionDiagnosticInfo(subscriptionId, limit); + + return Response.ok().entity(diagnosticInfo).build(); + } catch (Exception e) { + LOG.error("Error retrieving diagnostic info for subscription ID: {}", subscriptionId, e); + return Response.status(Response.Status.INTERNAL_SERVER_ERROR) + .entity( + "An error occurred while retrieving diagnostic info for subscription ID: " + + subscriptionId) + .build(); + } + } + + @GET + @Path("/name/{subscriptionName}/diagnosticInfo") + @Operation( + operationId = "getEventSubscriptionDiagnosticInfoByName", + summary = "Get event subscription diagnostic info by name", + description = + "Retrieve diagnostic information for a given event subscription name, including current and latest offsets, unprocessed events count, and more.", + responses = { + @ApiResponse( + responseCode = "200", + description = "Event subscription diagnostic info retrieved successfully", + content = + @Content( + mediaType = "application/json", + schema = @Schema(implementation = EventSubscriptionDiagnosticInfo.class))), + @ApiResponse(responseCode = "404", description = "Event subscription not found"), + @ApiResponse(responseCode = "500", description = "Internal server error") + }) + public Response getEventSubscriptionDiagnosticInfoByName( + @Context UriInfo uriInfo, + @Context SecurityContext securityContext, + @Parameter(description = "Maximum number of unprocessed events returned") + @DefaultValue("100") + @Min(0) + @QueryParam("limit") + int limit, + @Parameter(description = "Name of the Event Subscription", schema = @Schema(type = "string")) + @PathParam("subscriptionName") + String subscriptionName) { + authorizer.authorizeAdmin(securityContext); + try { + EventSubscription subscription = + repository.getByName(null, subscriptionName, repository.getFields("id")); + + if (subscription == null) { + return Response.status(Response.Status.NOT_FOUND) + .entity("Event subscription not found for name: " + subscriptionName) + .build(); + } + + EventSubscriptionDiagnosticInfo diagnosticInfo = + EventSubscriptionScheduler.getInstance() + .getEventSubscriptionDiagnosticInfo(subscription.getId(), limit); + + return Response.ok().entity(diagnosticInfo).build(); + } catch (Exception e) { + LOG.error("Error retrieving diagnostic info for subscription name: {}", subscriptionName, e); + return Response.status(Response.Status.INTERNAL_SERVER_ERROR) + .entity( + "An error occurred while retrieving diagnostic info for subscription name: " + + subscriptionName) + .build(); + } + } + + @GET + @Path("/id/{id}/failedEvents") + @Operation( + operationId = "getFailedEventsBySubscriptionId", + summary = "Get failed events for a subscription by id", + description = "Retrieve failed events for a given subscription id.", + responses = { + @ApiResponse( + responseCode = "200", + description = "Failed events retrieved successfully", + content = + @Content( + mediaType = "application/json", + schema = @Schema(implementation = ChangeEvent.class))), + @ApiResponse(responseCode = "404", description = "Event subscription not found"), + @ApiResponse(responseCode = "500", description = "Internal server error") + }) + public Response getFailedEvents( + @Context UriInfo uriInfo, + @Context SecurityContext securityContext, + @Parameter(description = "Id of the Event Subscription", schema = @Schema(type = "UUID")) + @PathParam("id") + UUID id, + @Parameter( + description = "Maximum number of failed events to retrieve", + schema = @Schema(type = "integer")) + @QueryParam("limit") + @DefaultValue("100") + @Min(0) + int limit, + @Parameter(description = "Source of the failed events", schema = @Schema(type = "string")) + @QueryParam("source") + String source) { + authorizer.authorizeAdmin(securityContext); + + try { + List failedEvents = + EventSubscriptionScheduler.getInstance().getFailedEventsByIdAndSource(id, source, limit); + + return Response.ok().entity(failedEvents).build(); + } catch (Exception e) { + LOG.error("Error retrieving failed events for subscription ID: {}", id, e); + return Response.status(Response.Status.INTERNAL_SERVER_ERROR) + .entity("An error occurred while retrieving failed events for subscription ID: " + id) + .build(); + } + } + + @GET + @Path("/name/{eventSubscriptionName}/failedEvents") + @Operation( + operationId = "getFailedEventsBySubscriptionName", + summary = "Get failed events for a subscription by name", + description = "Retrieve failed events for a given subscription name.", + responses = { + @ApiResponse( + responseCode = "200", + description = "Failed events retrieved successfully", + content = + @Content( + mediaType = "application/json", + schema = @Schema(implementation = ChangeEvent.class))), + @ApiResponse(responseCode = "404", description = "Event subscription not found"), + @ApiResponse(responseCode = "500", description = "Internal server error") + }) + public Response getFailedEventsByEventSubscriptionName( + @Context UriInfo uriInfo, + @Context SecurityContext securityContext, + @Parameter(description = "Name of the Event Subscription", schema = @Schema(type = "string")) + @PathParam("eventSubscriptionName") + String name, + @Parameter( + description = "Maximum number of failed events to retrieve", + schema = @Schema(type = "integer")) + @QueryParam("limit") + @DefaultValue("100") + @Min(0) + int limit, + @Parameter(description = "Source of the failed events", schema = @Schema(type = "string")) + @QueryParam("source") + String source) { + authorizer.authorizeAdmin(securityContext); + + try { + EventSubscription subscription = repository.getByName(null, name, repository.getFields("id")); + + List failedEvents = + EventSubscriptionScheduler.getInstance() + .getFailedEventsByIdAndSource(subscription.getId(), source, limit); + + return Response.ok().entity(failedEvents).build(); + + } catch (EntityNotFoundException ex) { + return Response.status(Response.Status.NOT_FOUND) + .entity("Event subscription not found for name: " + name) + .build(); + + } catch (Exception e) { + LOG.error("Error retrieving failed events for subscription Name: {}", name, e); + return Response.status(Response.Status.INTERNAL_SERVER_ERROR) + .entity("An error occurred while retrieving failed events for subscription name: " + name) + .build(); + } + } + + @GET + @Path("/listAllFailedEvents") + @Operation( + operationId = "getAllFailedEvents", + summary = "Get all failed events", + description = "Retrieve all failed events, optionally filtered by source, and apply a limit.", + responses = { + @ApiResponse( + responseCode = "200", + description = "Failed events retrieved successfully", + content = + @Content( + mediaType = "application/json", + schema = @Schema(implementation = ChangeEvent.class))), + @ApiResponse(responseCode = "500", description = "Internal server error") + }) + public Response getAllFailedEvents( + @Context UriInfo uriInfo, + @Context SecurityContext securityContext, + @Parameter( + description = "Maximum number of failed events to retrieve", + schema = @Schema(type = "integer")) + @QueryParam("limit") + @DefaultValue("100") + @Min(0) + int limit, + @Parameter(description = "Source of the failed events", schema = @Schema(type = "string")) + @QueryParam("source") + String source) { + authorizer.authorizeAdmin(securityContext); + + try { + List failedEvents = + EventSubscriptionScheduler.getInstance().getAllFailedEvents(source, limit); + + return Response.ok().entity(failedEvents).build(); + } catch (Exception e) { + LOG.error("Error retrieving all failed events", e); + return Response.status(Response.Status.INTERNAL_SERVER_ERROR) + .entity("An error occurred while retrieving all failed events." + e.getMessage()) + .build(); + } + } + + @GET + @Path("id/{id}/listSuccessfullySentChangeEvents") + @Operation( + operationId = "getSuccessfullySentChangeEventsForAlert", + summary = "Get successfully sent change events for an alert", + description = + "Retrieve successfully sent change events for a specific alert, identified by its ID, with an optional limit.", + responses = { + @ApiResponse( + responseCode = "200", + description = "Successfully sent change events retrieved successfully", + content = + @Content( + mediaType = "application/json", + schema = @Schema(implementation = ChangeEvent.class))), + @ApiResponse(responseCode = "404", description = "Alert not found"), + @ApiResponse(responseCode = "500", description = "Internal server error") + }) + public Response getSuccessfullySentChangeEventsForAlert( + @Context UriInfo uriInfo, + @Context SecurityContext securityContext, + @Parameter( + description = "ID of the alert to retrieve change events for", + schema = @Schema(type = "UUID")) + @PathParam("id") + UUID id, + @Parameter( + description = "Maximum number of change events to retrieve", + schema = @Schema(type = "integer")) + @QueryParam("limit") + @DefaultValue("100") + @Min(0) + int limit) { + + authorizer.authorizeAdmin(securityContext); + + try { + List changeEvents = + EventSubscriptionScheduler.getInstance() + .getSuccessfullySentChangeEventsForAlert(id, limit); + + return Response.ok().entity(changeEvents).build(); + } catch (EntityNotFoundException e) { + LOG.error("Alert not found: {}", id, e); + return Response.status(Response.Status.NOT_FOUND) + .entity(String.format("Alert with ID %s not found.", id)) + .build(); + } catch (Exception e) { + LOG.error("Error retrieving successfully sent change events for alert: {}", id, e); + return Response.status(Response.Status.INTERNAL_SERVER_ERROR) + .entity( + String.format( + "An error occurred while retrieving successfully sent change events for alert: %s. [%s]", + id, e.getMessage())) + .build(); + } + } + + @GET + @Path("name/{eventSubscriptionName}/listSuccessfullySentChangeEvents") + @Operation( + operationId = "getSuccessfullySentChangeEventsForAlertByName", + summary = "Get successfully sent change events for an alert by name", + description = + "Retrieve successfully sent change events for a specific alert, identified by its name, with an optional limit.", + responses = { + @ApiResponse( + responseCode = "200", + description = "Successfully sent change events retrieved successfully", + content = + @Content( + mediaType = "application/json", + schema = @Schema(implementation = ChangeEvent.class))), + @ApiResponse(responseCode = "404", description = "Alert not found"), + @ApiResponse(responseCode = "500", description = "Internal server error") + }) + public Response getSuccessfullySentChangeEventsForAlertByName( + @Context UriInfo uriInfo, + @Context SecurityContext securityContext, + @Parameter( + description = "Name of the alert to retrieve change events for", + schema = @Schema(type = "string")) + @PathParam("eventSubscriptionName") + String name, + @Parameter( + description = "Maximum number of change events to retrieve", + schema = @Schema(type = "integer")) + @QueryParam("limit") + @DefaultValue("100") + @Min(0) + int limit) { + authorizer.authorizeAdmin(securityContext); + + try { + EventSubscription subscription = repository.getByName(null, name, repository.getFields("id")); + + List changeEvents = + EventSubscriptionScheduler.getInstance() + .getSuccessfullySentChangeEventsForAlert(subscription.getId(), limit); + + return Response.ok().entity(changeEvents).build(); + } catch (EntityNotFoundException e) { + LOG.error("Alert not found with name: {}", name, e); + return Response.status(Response.Status.NOT_FOUND) + .entity(String.format("Alert with name '%s' not found.", name)) + .build(); + } catch (Exception e) { + LOG.error( + "Error retrieving successfully sent change events for alert with name: {}", name, e); + return Response.status(Response.Status.INTERNAL_SERVER_ERROR) + .entity( + String.format( + "An error occurred while retrieving successfully sent change events for alert with name: %s. [%s]", + name, e.getMessage())) + .build(); + } + } + + @GET + @Path("/id/{eventSubscriptionId}/destinations") + @Valid + @Operation( + operationId = "getAllDestinationForEventSubscription", + summary = "Get the destinations for a specific Event Subscription", + description = + "Retrieve the status of all destinations associated with the given Event Subscription ID", + responses = { + @ApiResponse( + responseCode = "200", + description = "Returns the destinations for the Event Subscription", + content = + @Content( + mediaType = "application/json", + schema = @Schema(implementation = SubscriptionDestination.class))), + @ApiResponse( + responseCode = "404", + description = "Event Subscription for instance {eventSubscriptionId} is not found") + }) + public List getAllDestinationForSubscriptionById( + @Context UriInfo uriInfo, + @Context SecurityContext securityContext, + @Parameter(description = "ID of the Event Subscription", schema = @Schema(type = "UUID")) + @PathParam("eventSubscriptionId") + UUID id) { + return EventSubscriptionScheduler.getInstance().listAlertDestinations(id); + } + + @GET + @Path("name/{eventSubscriptionName}/destinations") + @Valid + @Operation( + operationId = "getAllDestinationForEventSubscriptionByName", + summary = "Get the destinations for a specific Event Subscription by its name", + description = + "Retrieve the status of all destinations associated with the given Event Subscription's fully qualified name (FQN)", + responses = { + @ApiResponse( + responseCode = "200", + description = "Returns the destinations for the Event Subscription", + content = + @Content( + mediaType = "application/json", + schema = @Schema(implementation = SubscriptionDestination.class))), + @ApiResponse( + responseCode = "404", + description = "Event Subscription with the name {fqn} is not found") + }) + public List getAllDestinationStatusesForSubscriptionByName( + @Context UriInfo uriInfo, + @Context SecurityContext securityContext, + @Parameter(description = "Name of the Event Subscription", schema = @Schema(type = "string")) + @PathParam("eventSubscriptionName") + String name) { + authorizer.authorizeAdmin(securityContext); + EventSubscription sub = repository.getByName(null, name, repository.getFields("id")); + return EventSubscriptionScheduler.getInstance().listAlertDestinations(sub.getId()); + } + @POST @Path("/testDestination") @Operation( @@ -642,9 +1188,13 @@ public Response sendTestMessageAlert( @Context UriInfo uriInfo, @Context SecurityContext securityContext, EventSubscriptionDestinationTestRequest request) { + authorizer.authorizeAdmin(securityContext); + EventSubscription eventSubscription = new EventSubscription().withFullyQualifiedName(request.getAlertName()); + List resultDestinations = new ArrayList<>(); + // by-pass AbstractEventConsumer - covers external destinations as of now request .getDestinations() @@ -654,12 +1204,13 @@ public Response sendTestMessageAlert( AlertFactory.getAlert(eventSubscription, destination); try { alert.sendTestMessage(); + resultDestinations.add(destination); } catch (EventPublisherException e) { LOG.error(e.getMessage()); } }); - return Response.ok().build(); + return Response.ok(resultDestinations).build(); } private EventSubscription getEventSubscription(CreateEventSubscription create, String user) { diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/resources/settings/SettingsCache.java b/openmetadata-service/src/main/java/org/openmetadata/service/resources/settings/SettingsCache.java index f685c73b143e..a6785ddb4616 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/resources/settings/SettingsCache.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/resources/settings/SettingsCache.java @@ -13,8 +13,10 @@ package org.openmetadata.service.resources.settings; +import static org.openmetadata.schema.settings.SettingsType.ASSET_CERTIFICATION_SETTINGS; import static org.openmetadata.schema.settings.SettingsType.CUSTOM_UI_THEME_PREFERENCE; import static org.openmetadata.schema.settings.SettingsType.EMAIL_CONFIGURATION; +import static org.openmetadata.schema.settings.SettingsType.LINEAGE_SETTINGS; import static org.openmetadata.schema.settings.SettingsType.LOGIN_CONFIGURATION; import static org.openmetadata.schema.settings.SettingsType.SEARCH_SETTINGS; @@ -29,7 +31,10 @@ import org.openmetadata.api.configuration.ThemeConfiguration; import org.openmetadata.api.configuration.UiThemePreference; import org.openmetadata.schema.api.configuration.LoginConfiguration; -import org.openmetadata.schema.api.searcg.SearchSettings; +import org.openmetadata.schema.api.lineage.LineageLayer; +import org.openmetadata.schema.api.lineage.LineageSettings; +import org.openmetadata.schema.api.search.SearchSettings; +import org.openmetadata.schema.configuration.AssetCertificationSettings; import org.openmetadata.schema.email.SmtpSettings; import org.openmetadata.schema.settings.Settings; import org.openmetadata.schema.settings.SettingsType; @@ -124,6 +129,34 @@ private static void createDefaultConfiguration(OpenMetadataApplicationConfig app .withConfigValue(new SearchSettings().withEnableAccessControl(false)); systemRepository.createNewSetting(setting); } + + // Initialise Certification Settings + Settings certificationSettings = + systemRepository.getConfigWithKey(ASSET_CERTIFICATION_SETTINGS.toString()); + if (certificationSettings == null) { + Settings setting = + new Settings() + .withConfigType(ASSET_CERTIFICATION_SETTINGS) + .withConfigValue( + new AssetCertificationSettings() + .withAllowedClassification("Certification") + .withValidityPeriod("P30D")); + systemRepository.createNewSetting(setting); + } + + Settings lineageSettings = systemRepository.getConfigWithKey(LINEAGE_SETTINGS.toString()); + if (lineageSettings == null) { + // Only in case a config doesn't exist in DB we insert it + Settings setting = + new Settings() + .withConfigType(LINEAGE_SETTINGS) + .withConfigValue( + new LineageSettings() + .withDownstreamDepth(2) + .withUpstreamDepth(2) + .withLineageLayer(LineageLayer.ENTITY_LINEAGE)); + systemRepository.createNewSetting(setting); + } } public static T getSetting(SettingsType settingName, Class clazz) { diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/search/EntityBuilderConstant.java b/openmetadata-service/src/main/java/org/openmetadata/service/search/EntityBuilderConstant.java index 5815f984d7cf..3b800b9cc5e4 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/search/EntityBuilderConstant.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/search/EntityBuilderConstant.java @@ -28,6 +28,7 @@ public class EntityBuilderConstant { public static final String POST_TAG = ""; public static final Integer MAX_AGGREGATE_SIZE = 10000; public static final Integer MAX_RESULT_HITS = 10000; + public static final Integer MAX_ANALYZED_OFFSET = 1000; public static final String QUERY = "query"; public static final String QUERY_NGRAM = "query.ngram"; diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/search/SearchClient.java b/openmetadata-service/src/main/java/org/openmetadata/service/search/SearchClient.java index 7971e837d340..b5b7a10a7ca7 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/search/SearchClient.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/search/SearchClient.java @@ -15,7 +15,7 @@ import javax.ws.rs.core.Response; import lombok.Getter; import org.apache.commons.lang3.tuple.Pair; -import org.openmetadata.schema.api.searcg.SearchSettings; +import org.openmetadata.schema.api.search.SearchSettings; import org.openmetadata.schema.dataInsight.DataInsightChartResult; import org.openmetadata.schema.dataInsight.custom.DataInsightCustomChart; import org.openmetadata.schema.dataInsight.custom.DataInsightCustomChartResultList; diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/search/SearchRepository.java b/openmetadata-service/src/main/java/org/openmetadata/service/search/SearchRepository.java index 1a39cdc71b6d..5cb80c1e85ee 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/search/SearchRepository.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/search/SearchRepository.java @@ -449,7 +449,7 @@ private Pair> getInheritedFieldChanges( Map fieldData = new HashMap<>(); if (changeDescription != null) { - for (FieldChange field : changeDescription.getFieldsAdded()) { + for (FieldChange field : changeDescription.getFieldsDeleted()) { if (inheritableFields.contains(field.getName())) { try { if (field.getName().equals(FIELD_OWNERS)) { @@ -458,24 +458,21 @@ private Pair> getInheritedFieldChanges( for (EntityReference inheritedOwner : inheritedOwners) { inheritedOwner.setInherited(true); } - fieldData.put("updatedOwners", inheritedOwners); - scriptTxt.append(ADD_OWNERS_SCRIPT); + fieldData.put("deletedOwners", inheritedOwners); + scriptTxt.append(REMOVE_OWNERS_SCRIPT); } else { EntityReference entityReference = - JsonUtils.readValue(field.getNewValue().toString(), EntityReference.class); + JsonUtils.readValue(field.getOldValue().toString(), EntityReference.class); scriptTxt.append( String.format( - PROPAGATE_ENTITY_REFERENCE_FIELD_SCRIPT, - field.getName(), - field.getName(), + REMOVE_PROPAGATED_ENTITY_REFERENCE_FIELD_SCRIPT, field.getName(), field.getName(), field.getName())); - fieldData.put(field.getName(), entityReference); + fieldData.put(field.getName(), JsonUtils.getMap(entityReference)); } } catch (UnhandledServerException e) { - scriptTxt.append( - String.format(PROPAGATE_FIELD_SCRIPT, field.getName(), field.getNewValue())); + scriptTxt.append(String.format(REMOVE_PROPAGATED_FIELD_SCRIPT, field.getName())); } } } @@ -507,7 +504,7 @@ private Pair> getInheritedFieldChanges( } } } - for (FieldChange field : changeDescription.getFieldsDeleted()) { + for (FieldChange field : changeDescription.getFieldsAdded()) { if (inheritableFields.contains(field.getName())) { try { if (field.getName().equals(FIELD_OWNERS)) { @@ -516,21 +513,24 @@ private Pair> getInheritedFieldChanges( for (EntityReference inheritedOwner : inheritedOwners) { inheritedOwner.setInherited(true); } - fieldData.put("deletedOwners", inheritedOwners); - scriptTxt.append(REMOVE_OWNERS_SCRIPT); + fieldData.put("updatedOwners", inheritedOwners); + scriptTxt.append(ADD_OWNERS_SCRIPT); } else { EntityReference entityReference = - JsonUtils.readValue(field.getOldValue().toString(), EntityReference.class); + JsonUtils.readValue(field.getNewValue().toString(), EntityReference.class); scriptTxt.append( String.format( - REMOVE_PROPAGATED_ENTITY_REFERENCE_FIELD_SCRIPT, + PROPAGATE_ENTITY_REFERENCE_FIELD_SCRIPT, + field.getName(), + field.getName(), field.getName(), field.getName(), field.getName())); - fieldData.put(field.getName(), JsonUtils.getMap(entityReference)); + fieldData.put(field.getName(), entityReference); } } catch (UnhandledServerException e) { - scriptTxt.append(String.format(REMOVE_PROPAGATED_FIELD_SCRIPT, field.getName())); + scriptTxt.append( + String.format(PROPAGATE_FIELD_SCRIPT, field.getName(), field.getNewValue())); } } } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/search/elasticsearch/ElasticSearchClient.java b/openmetadata-service/src/main/java/org/openmetadata/service/search/elasticsearch/ElasticSearchClient.java index 59f13145be8d..e4fb3fe3c683 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/search/elasticsearch/ElasticSearchClient.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/search/elasticsearch/ElasticSearchClient.java @@ -25,6 +25,7 @@ import static org.openmetadata.service.search.EntityBuilderConstant.FIELD_DISPLAY_NAME_NGRAM; import static org.openmetadata.service.search.EntityBuilderConstant.FIELD_NAME_NGRAM; import static org.openmetadata.service.search.EntityBuilderConstant.MAX_AGGREGATE_SIZE; +import static org.openmetadata.service.search.EntityBuilderConstant.MAX_ANALYZED_OFFSET; import static org.openmetadata.service.search.EntityBuilderConstant.MAX_RESULT_HITS; import static org.openmetadata.service.search.EntityBuilderConstant.OWNER_DISPLAY_NAME_KEYWORD; import static org.openmetadata.service.search.EntityBuilderConstant.POST_TAG; @@ -1442,6 +1443,7 @@ private static HighlightBuilder buildHighlights(List fields) { } hb.preTags(PRE_TAG); hb.postTags(POST_TAG); + hb.maxAnalyzedOffset(MAX_ANALYZED_OFFSET); return hb; } diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/search/indexes/TestCaseResolutionStatusIndex.java b/openmetadata-service/src/main/java/org/openmetadata/service/search/indexes/TestCaseResolutionStatusIndex.java index 66dbf2c26d6a..4f9a512bd0c3 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/search/indexes/TestCaseResolutionStatusIndex.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/search/indexes/TestCaseResolutionStatusIndex.java @@ -59,6 +59,7 @@ private void setParentRelationships(Map doc) { .withDeleted(testCase.getDeleted()) .withDomain(testCase.getDomain()) .withTags(testCase.getTags()) + .withEntityFQN(testCase.getEntityFQN()) .withOwners(testCase.getOwners()); doc.put("testCase", testCase); TestSuite testSuite = Entity.getEntityOrNull(testCase.getTestSuite(), "", Include.ALL); diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/search/opensearch/OpenSearchClient.java b/openmetadata-service/src/main/java/org/openmetadata/service/search/opensearch/OpenSearchClient.java index 537e47c01e2f..bac465f2902f 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/search/opensearch/OpenSearchClient.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/search/opensearch/OpenSearchClient.java @@ -23,6 +23,7 @@ import static org.openmetadata.service.search.EntityBuilderConstant.FIELD_COLUMN_NAMES; import static org.openmetadata.service.search.EntityBuilderConstant.FIELD_DISPLAY_NAME_NGRAM; import static org.openmetadata.service.search.EntityBuilderConstant.MAX_AGGREGATE_SIZE; +import static org.openmetadata.service.search.EntityBuilderConstant.MAX_ANALYZED_OFFSET; import static org.openmetadata.service.search.EntityBuilderConstant.MAX_RESULT_HITS; import static org.openmetadata.service.search.EntityBuilderConstant.OWNER_DISPLAY_NAME_KEYWORD; import static org.openmetadata.service.search.EntityBuilderConstant.POST_TAG; @@ -1274,6 +1275,7 @@ private static HighlightBuilder buildHighlights(List fields) { } hb.preTags(PRE_TAG); hb.postTags(POST_TAG); + hb.maxAnalyzerOffset(MAX_ANALYZED_OFFSET); return hb; } diff --git a/openmetadata-service/src/main/resources/elasticsearch/en/test_case_resolution_status_index_mapping.json b/openmetadata-service/src/main/resources/elasticsearch/en/test_case_resolution_status_index_mapping.json index f4fcfdba57d6..55baf4d57a8c 100644 --- a/openmetadata-service/src/main/resources/elasticsearch/en/test_case_resolution_status_index_mapping.json +++ b/openmetadata-service/src/main/resources/elasticsearch/en/test_case_resolution_status_index_mapping.json @@ -659,7 +659,24 @@ } }, "fullyQualifiedName": { - "type": "text" + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "normalizer": "lowercase_normalizer", + "ignore_above": 256 + } + } + }, + "entityFQN": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "normalizer": "lowercase_normalizer", + "ignore_above": 256 + } + } }, "description": { "type": "text" diff --git a/openmetadata-service/src/main/resources/elasticsearch/jp/test_case_resolution_status_index_mapping.json b/openmetadata-service/src/main/resources/elasticsearch/jp/test_case_resolution_status_index_mapping.json index 3421d7d18aed..68043e988de0 100644 --- a/openmetadata-service/src/main/resources/elasticsearch/jp/test_case_resolution_status_index_mapping.json +++ b/openmetadata-service/src/main/resources/elasticsearch/jp/test_case_resolution_status_index_mapping.json @@ -620,7 +620,24 @@ } }, "fullyQualifiedName": { - "type": "text" + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "normalizer": "lowercase_normalizer", + "ignore_above": 256 + } + } + }, + "entityFQN": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "normalizer": "lowercase_normalizer", + "ignore_above": 256 + } + } }, "description": { "type": "text" diff --git a/openmetadata-service/src/main/resources/elasticsearch/zh/test_case_resolution_status_index_mapping.json b/openmetadata-service/src/main/resources/elasticsearch/zh/test_case_resolution_status_index_mapping.json index 510e9cb9b234..a777c9c37859 100644 --- a/openmetadata-service/src/main/resources/elasticsearch/zh/test_case_resolution_status_index_mapping.json +++ b/openmetadata-service/src/main/resources/elasticsearch/zh/test_case_resolution_status_index_mapping.json @@ -610,7 +610,24 @@ } }, "fullyQualifiedName": { - "type": "text" + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "normalizer": "lowercase_normalizer", + "ignore_above": 256 + } + } + }, + "entityFQN": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "normalizer": "lowercase_normalizer", + "ignore_above": 256 + } + } }, "description": { "type": "text" diff --git a/openmetadata-service/src/main/resources/json/data/database/databaseCsvDocumentation.json b/openmetadata-service/src/main/resources/json/data/database/databaseCsvDocumentation.json index 6357f94217f9..90b7c5fb1221 100644 --- a/openmetadata-service/src/main/resources/json/data/database/databaseCsvDocumentation.json +++ b/openmetadata-service/src/main/resources/json/data/database/databaseCsvDocumentation.json @@ -84,6 +84,23 @@ "examples": [ "Marketing", "Sales" ] + }, + { + "name": "extension", + "required": false, + "description": "Custom property values added to the glossary term. Each field value (property and its value) is separated by `;` and internal values can be separated by `|`. For `entityReferenceList` type property, pass `type1:fqn1|type2:fqn2`. For single `entityReference` type property, pass `type:fqn`. Similarly, for `enumMultiSelect`, pass values separated by `|`, and for `enumSingleSelect`, pass a single value along with the property name. For `timeInterval` property type, pass the `startTime:endTime` to the property name. If the field value itself contains delimiter values like `,` and `;` or newline they need to be quoted, and the quotation needs to be further escaped. In general, if passing multiple field values separated by `;`, the extension column value needs to be quoted.", + "examples": [ + "`customAttribute1:value1;customAttribute2:value2`", + "`\"dateCp:18-09-2024;dateTimeCp:18-09-2024 01:09:34;durationCp:PT5H30M10S;emailCp:admin@open-metadata.org\"`", + "`entRefListCp:searchIndex:elasticsearch_sample.table_search_index|databaseSchema:Glue.default.information_schema|databaseSchema:sample_data.ecommerce_db.shopify|database:Glue.default|`", + "`\"entRefCp:user:\"\"aaron.singh2\"\"\"`", + "`\"enumMultiSelectCp:val3|val2|val1|val4|val5;enumSingleSelectCp:singleVal1\"`", + "`\"timeCp:10:08:45;timeIntervalCp:1726142300000:17261420000;timeStampCp:1726142400000\"`", + "`\"integerCp:7777;numberCp:123456\"`", + "`\"\"\"queryCp:select col,row from table where id ='30';\"\";stringcp:sample string content\"`", + "`markdownCp:# Sample Markdown Text`", + "\"\"\"tableCp:row_1_col1_Value,row_1_col2_Value,row_1_col3_Value\"\"\"" + ] } ] } \ No newline at end of file diff --git a/openmetadata-service/src/main/resources/json/data/databaseSchema/databaseSchemaCsvDocumentation.json b/openmetadata-service/src/main/resources/json/data/databaseSchema/databaseSchemaCsvDocumentation.json index 55d0fd342eaf..32bd6c12f69b 100644 --- a/openmetadata-service/src/main/resources/json/data/databaseSchema/databaseSchemaCsvDocumentation.json +++ b/openmetadata-service/src/main/resources/json/data/databaseSchema/databaseSchemaCsvDocumentation.json @@ -84,6 +84,23 @@ "examples": [ "Marketing", "Sales" ] + }, + { + "name": "extension", + "required": false, + "description": "Custom property values added to the glossary term. Each field value (property and its value) is separated by `;` and internal values can be separated by `|`. For `entityReferenceList` type property, pass `type1:fqn1|type2:fqn2`. For single `entityReference` type property, pass `type:fqn`. Similarly, for `enumMultiSelect`, pass values separated by `|`, and for `enumSingleSelect`, pass a single value along with the property name. For `timeInterval` property type, pass the `startTime:endTime` to the property name. If the field value itself contains delimiter values like `,` and `;` or newline they need to be quoted, and the quotation needs to be further escaped. In general, if passing multiple field values separated by `;`, the extension column value needs to be quoted.", + "examples": [ + "`customAttribute1:value1;customAttribute2:value2`", + "`\"dateCp:18-09-2024;dateTimeCp:18-09-2024 01:09:34;durationCp:PT5H30M10S;emailCp:admin@open-metadata.org\"`", + "`entRefListCp:searchIndex:elasticsearch_sample.table_search_index|databaseSchema:Glue.default.information_schema|databaseSchema:sample_data.ecommerce_db.shopify|database:Glue.default|`", + "`\"entRefCp:user:\"\"aaron.singh2\"\"\"`", + "`\"enumMultiSelectCp:val3|val2|val1|val4|val5;enumSingleSelectCp:singleVal1\"`", + "`\"timeCp:10:08:45;timeIntervalCp:1726142300000:17261420000;timeStampCp:1726142400000\"`", + "`\"integerCp:7777;numberCp:123456\"`", + "`\"\"\"queryCp:select col,row from table where id ='30';\"\";stringcp:sample string content\"`", + "`markdownCp:# Sample Markdown Text`", + "\"\"\"tableCp:row_1_col1_Value,row_1_col2_Value,row_1_col3_Value\"\"\"" + ] } ] } \ No newline at end of file diff --git a/openmetadata-service/src/main/resources/json/data/databaseService/databaseServiceCsvDocumentation.json b/openmetadata-service/src/main/resources/json/data/databaseService/databaseServiceCsvDocumentation.json index 6c4346b69256..2b643158d134 100644 --- a/openmetadata-service/src/main/resources/json/data/databaseService/databaseServiceCsvDocumentation.json +++ b/openmetadata-service/src/main/resources/json/data/databaseService/databaseServiceCsvDocumentation.json @@ -68,6 +68,23 @@ "examples": [ "Marketing", "Sales" ] + }, + { + "name": "extension", + "required": false, + "description": "Custom property values added to the glossary term. Each field value (property and its value) is separated by `;` and internal values can be separated by `|`. For `entityReferenceList` type property, pass `type1:fqn1|type2:fqn2`. For single `entityReference` type property, pass `type:fqn`. Similarly, for `enumMultiSelect`, pass values separated by `|`, and for `enumSingleSelect`, pass a single value along with the property name. For `timeInterval` property type, pass the `startTime:endTime` to the property name. If the field value itself contains delimiter values like `,` and `;` or newline they need to be quoted, and the quotation needs to be further escaped. In general, if passing multiple field values separated by `;`, the extension column value needs to be quoted.", + "examples": [ + "`customAttribute1:value1;customAttribute2:value2`", + "`\"dateCp:18-09-2024;dateTimeCp:18-09-2024 01:09:34;durationCp:PT5H30M10S;emailCp:admin@open-metadata.org\"`", + "`entRefListCp:searchIndex:elasticsearch_sample.table_search_index|databaseSchema:Glue.default.information_schema|databaseSchema:sample_data.ecommerce_db.shopify|database:Glue.default|`", + "`\"entRefCp:user:\"\"aaron.singh2\"\"\"`", + "`\"enumMultiSelectCp:val3|val2|val1|val4|val5;enumSingleSelectCp:singleVal1\"`", + "`\"timeCp:10:08:45;timeIntervalCp:1726142300000:17261420000;timeStampCp:1726142400000\"`", + "`\"integerCp:7777;numberCp:123456\"`", + "`\"\"\"queryCp:select col,row from table where id ='30';\"\";stringcp:sample string content\"`", + "`markdownCp:# Sample Markdown Text`", + "\"\"\"tableCp:row_1_col1_Value,row_1_col2_Value,row_1_col3_Value\"\"\"" + ] } ] } \ No newline at end of file diff --git a/openmetadata-service/src/main/resources/json/data/glossary/glossaryCsvDocumentation.json b/openmetadata-service/src/main/resources/json/data/glossary/glossaryCsvDocumentation.json index 47750c710b29..0890452ad13d 100644 --- a/openmetadata-service/src/main/resources/json/data/glossary/glossaryCsvDocumentation.json +++ b/openmetadata-service/src/main/resources/json/data/glossary/glossaryCsvDocumentation.json @@ -108,7 +108,8 @@ "`\"timeCp:10:08:45;timeIntervalCp:1726142300000:17261420000;timeStampCp:1726142400000\"`", "`\"integerCp:7777;numberCp:123456\"`", "`\"\"\"queryCp:select col,row from table where id ='30';\"\";stringcp:sample string content\"`", - "`markdownCp:# Sample Markdown Text`" + "`markdownCp:# Sample Markdown Text`", + "\"\"\"tableCp:row_1_col1_Value,row_1_col2_Value,row_1_col3_Value\"\"\"" ] } ] diff --git a/openmetadata-service/src/main/resources/json/data/tags/certification.json b/openmetadata-service/src/main/resources/json/data/tags/certification.json new file mode 100644 index 000000000000..b74aac7681f6 --- /dev/null +++ b/openmetadata-service/src/main/resources/json/data/tags/certification.json @@ -0,0 +1,31 @@ +{ + "createClassification": { + "name": "Certification", + "description": "Certifying Data Asset will provide the users with a clear idea of how reliable a Data Asset is.", + "provider": "system", + "mutuallyExclusive": "true" + }, + "createTags": [ + { + "name": "Bronze", + "description": "Bronze certified Data Asset.", + "style": { + "color": "#CD7F32" + } + }, + { + "name": "Silver", + "description": "Silver certified Data Asset.", + "style": { + "color": "#C0C0C0" + } + }, + { + "name": "Gold", + "description": "Gold certified Data Asset.", + "style": { + "color": "#FFD700" + } + } + ] +} diff --git a/openmetadata-service/src/test/java/org/openmetadata/service/resources/EntityResourceTest.java b/openmetadata-service/src/test/java/org/openmetadata/service/resources/EntityResourceTest.java index ad387a12b48d..3922e5237e77 100644 --- a/openmetadata-service/src/test/java/org/openmetadata/service/resources/EntityResourceTest.java +++ b/openmetadata-service/src/test/java/org/openmetadata/service/resources/EntityResourceTest.java @@ -26,6 +26,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.openmetadata.common.utils.CommonUtil.listOf; import static org.openmetadata.common.utils.CommonUtil.listOrEmpty; import static org.openmetadata.common.utils.CommonUtil.nullOrEmpty; @@ -128,6 +129,7 @@ import org.openmetadata.schema.api.teams.CreateTeam; import org.openmetadata.schema.api.teams.CreateTeam.TeamType; import org.openmetadata.schema.api.tests.CreateTestSuite; +import org.openmetadata.schema.configuration.AssetCertificationSettings; import org.openmetadata.schema.dataInsight.DataInsightChart; import org.openmetadata.schema.dataInsight.type.KpiTarget; import org.openmetadata.schema.entities.docStore.Document; @@ -155,11 +157,14 @@ import org.openmetadata.schema.entity.type.Category; import org.openmetadata.schema.entity.type.CustomProperty; import org.openmetadata.schema.entity.type.Style; +import org.openmetadata.schema.settings.Settings; +import org.openmetadata.schema.settings.SettingsType; import org.openmetadata.schema.tests.TestDefinition; import org.openmetadata.schema.tests.TestSuite; import org.openmetadata.schema.type.AccessDetails; import org.openmetadata.schema.type.AnnouncementDetails; import org.openmetadata.schema.type.ApiStatus; +import org.openmetadata.schema.type.AssetCertification; import org.openmetadata.schema.type.ChangeDescription; import org.openmetadata.schema.type.ChangeEvent; import org.openmetadata.schema.type.Column; @@ -178,6 +183,7 @@ import org.openmetadata.service.OpenMetadataApplicationTest; import org.openmetadata.service.exception.CatalogExceptionMessage; import org.openmetadata.service.jdbi3.EntityRepository.EntityUpdater; +import org.openmetadata.service.jdbi3.SystemRepository; import org.openmetadata.service.resources.apis.APICollectionResourceTest; import org.openmetadata.service.resources.bots.BotResourceTest; import org.openmetadata.service.resources.databases.TableResourceTest; @@ -209,6 +215,7 @@ import org.openmetadata.service.search.models.IndexMapping; import org.openmetadata.service.security.SecurityUtil; import org.openmetadata.service.util.EntityUtil; +import org.openmetadata.service.util.FullyQualifiedName; import org.openmetadata.service.util.JsonUtils; import org.openmetadata.service.util.ResultList; import org.openmetadata.service.util.TestUtils; @@ -247,6 +254,7 @@ public abstract class EntityResourceTest updateRecords = listOf( String.format( - "s1,dsp1,new-dsc1,user:%s,,,Tier.Tier1,P23DT23H,http://test.com,%s", + "s1,dsp1,new-dsc1,user:%s,,,Tier.Tier1,P23DT23H,http://test.com,%s,", user1, escapeCsv(DOMAIN.getFullyQualifiedName()))); // Update created entity with changes diff --git a/openmetadata-service/src/test/java/org/openmetadata/service/resources/domains/DomainResourceTest.java b/openmetadata-service/src/test/java/org/openmetadata/service/resources/domains/DomainResourceTest.java index 0cfa31d29f49..7054353f2f6c 100644 --- a/openmetadata-service/src/test/java/org/openmetadata/service/resources/domains/DomainResourceTest.java +++ b/openmetadata-service/src/test/java/org/openmetadata/service/resources/domains/DomainResourceTest.java @@ -10,9 +10,7 @@ import static org.openmetadata.service.util.EntityUtil.fieldDeleted; import static org.openmetadata.service.util.EntityUtil.fieldUpdated; import static org.openmetadata.service.util.TestUtils.ADMIN_AUTH_HEADERS; -import static org.openmetadata.service.util.TestUtils.UpdateType.CHANGE_CONSOLIDATED; import static org.openmetadata.service.util.TestUtils.UpdateType.MINOR_UPDATE; -import static org.openmetadata.service.util.TestUtils.UpdateType.REVERT; import static org.openmetadata.service.util.TestUtils.assertEntityReferenceNames; import static org.openmetadata.service.util.TestUtils.assertListNotNull; import static org.openmetadata.service.util.TestUtils.assertListNull; @@ -71,21 +69,17 @@ void testDomainExpertsUpdate(TestInfo test) throws IOException { fieldDeleted(change, "experts", listOf(USER2.getEntityReference())); domain = updateAndCheckEntity(create, Status.OK, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); - // Add User2 back as expert using PATCH - // Version 0. 2 - Changes from this PATCH is consolidated with the previous change resulting in - // no change String json = JsonUtils.pojoToJson(domain); domain.withExperts(List.of(USER1.getEntityReference(), USER2.getEntityReference())); - change = getChangeDescription(domain, REVERT); - domain = patchEntityAndCheck(domain, json, ADMIN_AUTH_HEADERS, REVERT, change); + change = getChangeDescription(domain, MINOR_UPDATE); + fieldAdded(change, "experts", listOf(USER2.getEntityReference())); + domain = patchEntityAndCheck(domain, json, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); - // Remove User2 as expert using PATCH - // Version 0.1 - Changes from this PATCH is consolidated with the previous two changes resulting - // in deletion of USER2 json = JsonUtils.pojoToJson(domain); - change = getChangeDescription(domain, REVERT); + change = getChangeDescription(domain, MINOR_UPDATE); + fieldDeleted(change, "experts", listOf(USER2.getEntityReference())); domain.withExperts(List.of(USER1.getEntityReference())); - patchEntityAndCheck(domain, json, ADMIN_AUTH_HEADERS, REVERT, change); + patchEntityAndCheck(domain, json, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); } @Test @@ -104,9 +98,9 @@ void testDomainTypeUpdate(TestInfo test) throws IOException { // Changes from this PATCH is consolidated with the previous changes String json = JsonUtils.pojoToJson(domain); domain.withDomainType(DomainType.CONSUMER_ALIGNED); - change = getChangeDescription(domain, CHANGE_CONSOLIDATED); - fieldUpdated(change, "domainType", DomainType.AGGREGATE, DomainType.CONSUMER_ALIGNED); - patchEntityAndCheck(domain, json, ADMIN_AUTH_HEADERS, CHANGE_CONSOLIDATED, change); + change = getChangeDescription(domain, MINOR_UPDATE); + fieldUpdated(change, "domainType", DomainType.SOURCE_ALIGNED, DomainType.CONSUMER_ALIGNED); + patchEntityAndCheck(domain, json, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); } @Test diff --git a/openmetadata-service/src/test/java/org/openmetadata/service/resources/glossary/GlossaryResourceTest.java b/openmetadata-service/src/test/java/org/openmetadata/service/resources/glossary/GlossaryResourceTest.java index 248bc9b51fbc..f6543cdaa50a 100644 --- a/openmetadata-service/src/test/java/org/openmetadata/service/resources/glossary/GlossaryResourceTest.java +++ b/openmetadata-service/src/test/java/org/openmetadata/service/resources/glossary/GlossaryResourceTest.java @@ -37,11 +37,11 @@ import static org.openmetadata.schema.type.TaskType.RequestDescription; import static org.openmetadata.service.security.SecurityUtil.authHeaders; import static org.openmetadata.service.util.EntityUtil.fieldAdded; +import static org.openmetadata.service.util.EntityUtil.fieldDeleted; import static org.openmetadata.service.util.EntityUtil.fieldUpdated; import static org.openmetadata.service.util.EntityUtil.getFqn; import static org.openmetadata.service.util.EntityUtil.toTagLabels; import static org.openmetadata.service.util.TestUtils.ADMIN_AUTH_HEADERS; -import static org.openmetadata.service.util.TestUtils.UpdateType.CHANGE_CONSOLIDATED; import static org.openmetadata.service.util.TestUtils.UpdateType.MINOR_UPDATE; import static org.openmetadata.service.util.TestUtils.assertListNull; import static org.openmetadata.service.util.TestUtils.assertResponse; @@ -168,11 +168,9 @@ void patch_addDeleteReviewers(TestInfo test) throws IOException { glossary.withReviewers(List.of(USER1_REF, USER2_REF)); change = getChangeDescription( - glossary, - CHANGE_CONSOLIDATED); // PATCH operation update is consolidated in a user session - fieldAdded(change, "reviewers", List.of(USER1_REF, USER2_REF)); - glossary = - patchEntityAndCheck(glossary, origJson, ADMIN_AUTH_HEADERS, CHANGE_CONSOLIDATED, change); + glossary, MINOR_UPDATE); // PATCH operation update is consolidated in a user session + fieldAdded(change, "reviewers", List.of(USER2_REF)); + glossary = patchEntityAndCheck(glossary, origJson, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); // Create a glossary term and assign USER2 as a reviewer GlossaryTermResourceTest glossaryTermResourceTest = new GlossaryTermResourceTest(); @@ -220,10 +218,9 @@ void patch_addDeleteReviewers(TestInfo test) throws IOException { glossary.withReviewers(List.of(USER2_REF)); change = getChangeDescription( - glossary, - CHANGE_CONSOLIDATED); // PATCH operation update is consolidated in a user session - fieldAdded(change, "reviewers", List.of(USER2_REF)); - patchEntityAndCheck(glossary, origJson, ADMIN_AUTH_HEADERS, CHANGE_CONSOLIDATED, change); + glossary, MINOR_UPDATE); // PATCH operation update is consolidated in a user session + fieldDeleted(change, "reviewers", List.of(USER1_REF)); + patchEntityAndCheck(glossary, origJson, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); // Verify that USER1_REF is removed from the reviewers for the terms inside the glossary GLOSSARY_TERM1 = diff --git a/openmetadata-service/src/test/java/org/openmetadata/service/resources/services/DatabaseServiceResourceTest.java b/openmetadata-service/src/test/java/org/openmetadata/service/resources/services/DatabaseServiceResourceTest.java index f3eee61abe75..8472dd6e7425 100644 --- a/openmetadata-service/src/test/java/org/openmetadata/service/resources/services/DatabaseServiceResourceTest.java +++ b/openmetadata-service/src/test/java/org/openmetadata/service/resources/services/DatabaseServiceResourceTest.java @@ -15,10 +15,16 @@ import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static javax.ws.rs.core.Response.Status.OK; +import static org.apache.commons.lang.StringEscapeUtils.escapeCsv; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.openmetadata.common.utils.CommonUtil.listOf; +import static org.openmetadata.csv.CsvUtil.recordToString; +import static org.openmetadata.csv.EntityCsv.entityNotFound; +import static org.openmetadata.csv.EntityCsvTest.*; +import static org.openmetadata.csv.EntityCsvTest.assertRows; import static org.openmetadata.service.exception.CatalogExceptionMessage.invalidEnumValue; import static org.openmetadata.service.util.EntityUtil.fieldAdded; import static org.openmetadata.service.util.EntityUtil.fieldUpdated; @@ -35,14 +41,18 @@ import java.util.Map; import java.util.UUID; import javax.ws.rs.client.WebTarget; +import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.apache.http.client.HttpResponseException; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; +import org.openmetadata.csv.EntityCsv; +import org.openmetadata.schema.api.data.CreateDatabase; import org.openmetadata.schema.api.services.CreateDatabaseService; import org.openmetadata.schema.api.services.CreateDatabaseService.DatabaseServiceType; import org.openmetadata.schema.api.services.DatabaseConnection; import org.openmetadata.schema.api.services.ingestionPipelines.CreateIngestionPipeline; +import org.openmetadata.schema.entity.data.Database; import org.openmetadata.schema.entity.services.DatabaseService; import org.openmetadata.schema.entity.services.connections.TestConnectionResult; import org.openmetadata.schema.entity.services.connections.TestConnectionResultStatus; @@ -57,13 +67,18 @@ import org.openmetadata.schema.services.connections.database.RedshiftConnection; import org.openmetadata.schema.services.connections.database.SnowflakeConnection; import org.openmetadata.schema.services.connections.database.common.basicAuth; +import org.openmetadata.schema.type.ApiStatus; import org.openmetadata.schema.type.ChangeDescription; import org.openmetadata.schema.type.Include; import org.openmetadata.schema.type.Schedule; +import org.openmetadata.schema.type.csv.CsvImportResult; import org.openmetadata.service.Entity; +import org.openmetadata.service.jdbi3.DatabaseServiceRepository.DatabaseServiceCsv; +import org.openmetadata.service.resources.databases.DatabaseResourceTest; import org.openmetadata.service.resources.services.database.DatabaseServiceResource.DatabaseServiceList; import org.openmetadata.service.resources.services.ingestionpipelines.IngestionPipelineResourceTest; import org.openmetadata.service.secrets.masker.PasswordEntityMasker; +import org.openmetadata.service.util.FullyQualifiedName; import org.openmetadata.service.util.JsonUtils; import org.openmetadata.service.util.TestUtils; @@ -321,6 +336,72 @@ void get_listDatabaseServicesWithInvalidEnumValue_400() { invalidEnumValue(Include.class)); } + @Test + @SneakyThrows + void testImportInvalidCsv() { + DatabaseService service = createEntity(createRequest("invalidCsv"), ADMIN_AUTH_HEADERS); + String serviceName = service.getFullyQualifiedName(); + DatabaseResourceTest databaseTest = new DatabaseResourceTest(); + CreateDatabase createDatabase = databaseTest.createRequest("s1").withService(serviceName); + databaseTest.createEntity(createDatabase, ADMIN_AUTH_HEADERS); + + // Headers: name, displayName, description, owner, tags, glossaryTerms, tiers, domain, extension + // Update database with invalid tags field + String resultsHeader = recordToString(EntityCsv.getResultHeaders(DatabaseServiceCsv.HEADERS)); + String record = "d1,dsp1,dsc1,,Tag.invalidTag,,,,"; + String csv = createCsv(DatabaseServiceCsv.HEADERS, listOf(record), null); + CsvImportResult result = importCsv(serviceName, csv, false); + assertSummary(result, ApiStatus.PARTIAL_SUCCESS, 2, 1, 1); + String[] expectedRows = + new String[] { + resultsHeader, getFailedRecord(record, entityNotFound(4, "tag", "Tag.invalidTag")) + }; + assertRows(result, expectedRows); + + // invalid tag it will give error. + record = "non-existing,dsp1,dsc1,,Tag.invalidTag,,,,"; + csv = createCsv(DatabaseServiceCsv.HEADERS, listOf(record), null); + result = importCsv(serviceName, csv, false); + assertSummary(result, ApiStatus.PARTIAL_SUCCESS, 2, 1, 1); + expectedRows = + new String[] { + resultsHeader, getFailedRecord(record, entityNotFound(4, "tag", "Tag.invalidTag")) + }; + assertRows(result, expectedRows); + + // database will be created if it does not exist + String databaseFqn = FullyQualifiedName.add(serviceName, "non-existing"); + record = "non-existing,dsp1,dsc1,,,,,,"; + csv = createCsv(DatabaseServiceCsv.HEADERS, listOf(record), null); + result = importCsv(serviceName, csv, false); + assertSummary(result, ApiStatus.SUCCESS, 2, 2, 0); + expectedRows = new String[] {resultsHeader, getSuccessRecord(record, "Entity created")}; + assertRows(result, expectedRows); + Database createdDatabase = databaseTest.getEntityByName(databaseFqn, "id", ADMIN_AUTH_HEADERS); + assertEquals(databaseFqn, createdDatabase.getFullyQualifiedName()); + } + + @Test + void testImportExport() throws IOException { + String user1 = USER1.getName(); + DatabaseService service = createEntity(createRequest("importExportTest"), ADMIN_AUTH_HEADERS); + DatabaseResourceTest databaseTest = new DatabaseResourceTest(); + CreateDatabase createDatabase = + databaseTest.createRequest("d1").withService(service.getFullyQualifiedName()); + databaseTest.createEntity(createDatabase, ADMIN_AUTH_HEADERS); + + // Headers: name, displayName, description, owner, tags, glossaryTerms, tiers, domain, extension + // Update terms with change in description + String record = + String.format( + "d1,dsp1,new-dsc1,user:%s,,,Tier.Tier1,%s,", + user1, escapeCsv(DOMAIN.getFullyQualifiedName())); + + // Update created entity with changes + importCsvAndValidate( + service.getFullyQualifiedName(), DatabaseServiceCsv.HEADERS, null, listOf(record)); + } + public DatabaseService putTestConnectionResult( UUID serviceId, TestConnectionResult testConnectionResult, Map authHeaders) throws HttpResponseException { diff --git a/openmetadata-service/src/test/java/org/openmetadata/service/resources/system/SystemResourceTest.java b/openmetadata-service/src/test/java/org/openmetadata/service/resources/system/SystemResourceTest.java index 72e4475ee992..53a223359add 100644 --- a/openmetadata-service/src/test/java/org/openmetadata/service/resources/system/SystemResourceTest.java +++ b/openmetadata-service/src/test/java/org/openmetadata/service/resources/system/SystemResourceTest.java @@ -28,9 +28,12 @@ import org.openmetadata.api.configuration.LogoConfiguration; import org.openmetadata.api.configuration.ThemeConfiguration; import org.openmetadata.api.configuration.UiThemePreference; +import org.openmetadata.schema.api.configuration.LoginConfiguration; import org.openmetadata.schema.api.configuration.profiler.MetricConfigurationDefinition; import org.openmetadata.schema.api.configuration.profiler.ProfilerConfiguration; import org.openmetadata.schema.api.data.*; +import org.openmetadata.schema.api.lineage.LineageSettings; +import org.openmetadata.schema.api.search.SearchSettings; import org.openmetadata.schema.api.services.CreateDashboardService; import org.openmetadata.schema.api.services.CreateDatabaseService; import org.openmetadata.schema.api.services.CreateMessagingService; @@ -42,6 +45,7 @@ import org.openmetadata.schema.api.tests.CreateTestSuite; import org.openmetadata.schema.auth.JWTAuthMechanism; import org.openmetadata.schema.auth.JWTTokenExpiry; +import org.openmetadata.schema.configuration.AssetCertificationSettings; import org.openmetadata.schema.email.SmtpSettings; import org.openmetadata.schema.entity.data.Table; import org.openmetadata.schema.entity.teams.AuthenticationMechanism; @@ -70,6 +74,7 @@ import org.openmetadata.service.resources.services.MlModelServiceResourceTest; import org.openmetadata.service.resources.services.PipelineServiceResourceTest; import org.openmetadata.service.resources.services.StorageServiceResourceTest; +import org.openmetadata.service.resources.settings.SettingsCache; import org.openmetadata.service.resources.storages.ContainerResourceTest; import org.openmetadata.service.resources.teams.TeamResourceTest; import org.openmetadata.service.resources.teams.UserResourceTest; @@ -341,6 +346,210 @@ void validate_test() throws HttpResponseException { assertEquals(Boolean.TRUE, response.getMigrations().getPassed()); } + @Test + void testDefaultSettingsInitialization() throws HttpResponseException { + SettingsCache.initialize(config); + Settings emailSettings = getSystemConfig(SettingsType.EMAIL_CONFIGURATION); + Settings uiThemeSettings = getSystemConfig(SettingsType.CUSTOM_UI_THEME_PREFERENCE); + SmtpSettings smtpSettings = + JsonUtils.convertValue(emailSettings.getConfigValue(), SmtpSettings.class); + assertEquals(config.getSmtpSettings().getUsername(), smtpSettings.getUsername()); + assertEquals(config.getSmtpSettings().getEmailingEntity(), smtpSettings.getEmailingEntity()); + UiThemePreference uiThemePreference = + JsonUtils.convertValue(uiThemeSettings.getConfigValue(), UiThemePreference.class); + assertEquals("", uiThemePreference.getCustomTheme().getPrimaryColor()); + assertEquals("", uiThemePreference.getCustomLogoConfig().getCustomLogoUrlPath()); + } + + @Test + void testEmailConfigurationSettings() throws HttpResponseException { + Settings emailSettings = getSystemConfig(SettingsType.EMAIL_CONFIGURATION); + SmtpSettings smtpSettings = + JsonUtils.convertValue(emailSettings.getConfigValue(), SmtpSettings.class); + SmtpSettings expectedSmtpSettings = config.getSmtpSettings(); + expectedSmtpSettings.setPassword( + smtpSettings.getPassword()); // Password is encrypted, so we use the stored one + assertEquals(expectedSmtpSettings, smtpSettings); + smtpSettings.setUsername("updatedUsername"); + smtpSettings.setEmailingEntity("updatedEntity"); + + Settings updatedEmailSettings = + new Settings() + .withConfigType(SettingsType.EMAIL_CONFIGURATION) + .withConfigValue(smtpSettings); + + updateSystemConfig(updatedEmailSettings); + + Settings updatedSettings = getSystemConfig(SettingsType.EMAIL_CONFIGURATION); + SmtpSettings updatedSmtpSettings = + JsonUtils.convertValue(updatedSettings.getConfigValue(), SmtpSettings.class); + + assertEquals("updatedUsername", updatedSmtpSettings.getUsername()); + assertEquals("updatedEntity", updatedSmtpSettings.getEmailingEntity()); + } + + @Order(3) + @Test + void testUiThemePreferenceSettings() throws HttpResponseException { + Settings uiThemeSettings = getSystemConfig(SettingsType.CUSTOM_UI_THEME_PREFERENCE); + UiThemePreference uiThemePreference = + JsonUtils.convertValue(uiThemeSettings.getConfigValue(), UiThemePreference.class); + assertEquals("", uiThemePreference.getCustomTheme().getPrimaryColor()); + assertEquals("", uiThemePreference.getCustomLogoConfig().getCustomLogoUrlPath()); + + uiThemePreference.getCustomTheme().setPrimaryColor("#FFFFFF"); + uiThemePreference.getCustomLogoConfig().setCustomLogoUrlPath("http://example.com/logo.png"); + + Settings updatedUiThemeSettings = + new Settings() + .withConfigType(SettingsType.CUSTOM_UI_THEME_PREFERENCE) + .withConfigValue(uiThemePreference); + + updateSystemConfig(updatedUiThemeSettings); + + Settings updatedSettings = getSystemConfig(SettingsType.CUSTOM_UI_THEME_PREFERENCE); + UiThemePreference updatedUiThemePreference = + JsonUtils.convertValue(updatedSettings.getConfigValue(), UiThemePreference.class); + + assertEquals("#FFFFFF", updatedUiThemePreference.getCustomTheme().getPrimaryColor()); + assertEquals( + "http://example.com/logo.png", + updatedUiThemePreference.getCustomLogoConfig().getCustomLogoUrlPath()); + // reset to default + uiThemePreference.getCustomTheme().setPrimaryColor(""); + uiThemePreference.getCustomLogoConfig().setCustomLogoUrlPath(""); + updatedUiThemeSettings = + new Settings() + .withConfigType(SettingsType.CUSTOM_UI_THEME_PREFERENCE) + .withConfigValue(uiThemePreference); + updateSystemConfig(updatedUiThemeSettings); + } + + @Test + void testLoginConfigurationSettings() throws HttpResponseException { + // Retrieve the default login configuration settings + Settings loginSettings = getSystemConfig(SettingsType.LOGIN_CONFIGURATION); + LoginConfiguration loginConfig = + JsonUtils.convertValue(loginSettings.getConfigValue(), LoginConfiguration.class); + + // Assert default values + assertEquals(3, loginConfig.getMaxLoginFailAttempts()); + assertEquals(600, loginConfig.getAccessBlockTime()); + assertEquals(3600, loginConfig.getJwtTokenExpiryTime()); + + // Update login configuration + loginConfig.setMaxLoginFailAttempts(5); + loginConfig.setAccessBlockTime(300); + loginConfig.setJwtTokenExpiryTime(7200); + + Settings updatedLoginSettings = + new Settings() + .withConfigType(SettingsType.LOGIN_CONFIGURATION) + .withConfigValue(loginConfig); + + updateSystemConfig(updatedLoginSettings); + + // Retrieve the updated settings + Settings updatedSettings = getSystemConfig(SettingsType.LOGIN_CONFIGURATION); + LoginConfiguration updatedLoginConfig = + JsonUtils.convertValue(updatedSettings.getConfigValue(), LoginConfiguration.class); + + // Assert updated values + assertEquals(5, updatedLoginConfig.getMaxLoginFailAttempts()); + assertEquals(300, updatedLoginConfig.getAccessBlockTime()); + assertEquals(7200, updatedLoginConfig.getJwtTokenExpiryTime()); + } + + @Test + void testSearchSettings() throws HttpResponseException { + // Retrieve the default search settings + Settings searchSettings = getSystemConfig(SettingsType.SEARCH_SETTINGS); + SearchSettings searchConfig = + JsonUtils.convertValue(searchSettings.getConfigValue(), SearchSettings.class); + + // Assert default values + assertEquals(false, searchConfig.getEnableAccessControl()); + + // Update search settings + searchConfig.setEnableAccessControl(true); + + Settings updatedSearchSettings = + new Settings().withConfigType(SettingsType.SEARCH_SETTINGS).withConfigValue(searchConfig); + + updateSystemConfig(updatedSearchSettings); + + // Retrieve the updated settings + Settings updatedSettings = getSystemConfig(SettingsType.SEARCH_SETTINGS); + SearchSettings updatedSearchConfig = + JsonUtils.convertValue(updatedSettings.getConfigValue(), SearchSettings.class); + + // Assert updated values + assertEquals(true, updatedSearchConfig.getEnableAccessControl()); + } + + @Test + void testAssetCertificationSettings() throws HttpResponseException { + // Retrieve the default asset certification settings + Settings certificationSettings = getSystemConfig(SettingsType.ASSET_CERTIFICATION_SETTINGS); + AssetCertificationSettings certificationConfig = + JsonUtils.convertValue( + certificationSettings.getConfigValue(), AssetCertificationSettings.class); + + // Assert default values + assertEquals("Certification", certificationConfig.getAllowedClassification()); + assertEquals("P30D", certificationConfig.getValidityPeriod()); + + // Update asset certification settings + certificationConfig.setAllowedClassification("NewCertification"); + certificationConfig.setValidityPeriod("P60D"); + + Settings updatedCertificationSettings = + new Settings() + .withConfigType(SettingsType.ASSET_CERTIFICATION_SETTINGS) + .withConfigValue(certificationConfig); + + updateSystemConfig(updatedCertificationSettings); + + // Retrieve the updated settings + Settings updatedSettings = getSystemConfig(SettingsType.ASSET_CERTIFICATION_SETTINGS); + AssetCertificationSettings updatedCertificationConfig = + JsonUtils.convertValue(updatedSettings.getConfigValue(), AssetCertificationSettings.class); + + // Assert updated values + assertEquals("NewCertification", updatedCertificationConfig.getAllowedClassification()); + assertEquals("P60D", updatedCertificationConfig.getValidityPeriod()); + } + + @Test + void testLineageSettings() throws HttpResponseException { + // Retrieve the default lineage settings + Settings lineageSettings = getSystemConfig(SettingsType.LINEAGE_SETTINGS); + LineageSettings lineageConfig = + JsonUtils.convertValue(lineageSettings.getConfigValue(), LineageSettings.class); + + // Assert default values + assertEquals(2, lineageConfig.getUpstreamDepth()); + assertEquals(2, lineageConfig.getDownstreamDepth()); + + // Update lineage settings + lineageConfig.setUpstreamDepth(3); + lineageConfig.setDownstreamDepth(4); + + Settings updatedLineageSettings = + new Settings().withConfigType(SettingsType.LINEAGE_SETTINGS).withConfigValue(lineageConfig); + + updateSystemConfig(updatedLineageSettings); + + // Retrieve the updated settings + Settings updatedSettings = getSystemConfig(SettingsType.LINEAGE_SETTINGS); + LineageSettings updatedLineageConfig = + JsonUtils.convertValue(updatedSettings.getConfigValue(), LineageSettings.class); + + // Assert updated values + assertEquals(3, updatedLineageConfig.getUpstreamDepth()); + assertEquals(4, updatedLineageConfig.getDownstreamDepth()); + } + @Test void globalProfilerConfig(TestInfo test) throws HttpResponseException { // Create a profiler config diff --git a/openmetadata-service/src/test/java/org/openmetadata/service/resources/teams/TeamResourceTest.java b/openmetadata-service/src/test/java/org/openmetadata/service/resources/teams/TeamResourceTest.java index e766f1db028f..96f9e2438c39 100644 --- a/openmetadata-service/src/test/java/org/openmetadata/service/resources/teams/TeamResourceTest.java +++ b/openmetadata-service/src/test/java/org/openmetadata/service/resources/teams/TeamResourceTest.java @@ -52,10 +52,8 @@ import static org.openmetadata.service.util.TestUtils.TEST_AUTH_HEADERS; import static org.openmetadata.service.util.TestUtils.TEST_USER_NAME; import static org.openmetadata.service.util.TestUtils.USER_WITH_CREATE_HEADERS; -import static org.openmetadata.service.util.TestUtils.UpdateType.CHANGE_CONSOLIDATED; import static org.openmetadata.service.util.TestUtils.UpdateType.MINOR_UPDATE; import static org.openmetadata.service.util.TestUtils.UpdateType.NO_CHANGE; -import static org.openmetadata.service.util.TestUtils.UpdateType.REVERT; import static org.openmetadata.service.util.TestUtils.assertListNotNull; import static org.openmetadata.service.util.TestUtils.assertResponse; import static org.openmetadata.service.util.TestUtils.validateEntityReferences; @@ -600,11 +598,12 @@ void put_patch_hierarchicalTeams() throws IOException { bu2 = updateAndCheckEntity(create, OK, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); // Change bu2 parent from Organization to bu1 using PATCH operation. - // Change from this PATCH is combined with the previous PUT resulting in no change String json = JsonUtils.pojoToJson(bu2); - change = getChangeDescription(bu2, REVERT); + change = getChangeDescription(bu2, MINOR_UPDATE); bu2.setParents(List.of(bu1.getEntityReference())); - patchEntityAndCheck(bu2, json, ADMIN_AUTH_HEADERS, REVERT, change); + fieldAdded(change, "parents", List.of(bu1.getEntityReference())); + fieldDeleted(change, "parents", List.of(ORG_TEAM.getEntityReference())); + patchEntityAndCheck(bu2, json, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); } @Test @@ -626,11 +625,11 @@ void patch_isJoinable_200(TestInfo test) throws IOException { fieldUpdated(change, "isJoinable", false, true); team = patchEntityAndCheck(team, json, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); - // set isJoinable to false - change from this PATCH and the previous are consolidated resulting - // in no change json = JsonUtils.pojoToJson(team); team.setIsJoinable(false); - patchEntityAndCheck(team, json, ADMIN_AUTH_HEADERS, NO_CHANGE, null); + change = getChangeDescription(team, MINOR_UPDATE); + fieldUpdated(change, "isJoinable", true, false); + patchEntityAndCheck(team, json, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); } @Test @@ -672,10 +671,9 @@ void patch_deleteUserAndDefaultRolesFromTeam_200(TestInfo test) throws IOExcepti int removeDefaultRoleIndex = new Random().nextInt(roles.size()); EntityReference deletedRole = team.getDefaultRoles().get(removeDefaultRoleIndex); team.getDefaultRoles().remove(removeDefaultRoleIndex); - change = getChangeDescription(team, CHANGE_CONSOLIDATED); - fieldDeleted(change, "users", CommonUtil.listOf(deletedUser)); + change = getChangeDescription(team, MINOR_UPDATE); fieldDeleted(change, "defaultRoles", CommonUtil.listOf(deletedRole)); - patchEntityAndCheck(team, json, ADMIN_AUTH_HEADERS, CHANGE_CONSOLIDATED, change); + patchEntityAndCheck(team, json, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); } @Test @@ -725,8 +723,10 @@ void patch_teamWithPolicies(TestInfo test) throws IOException { // resulting in no change json = JsonUtils.pojoToJson(team); team.withPolicies(null); - change = getChangeDescription(team, REVERT); - patchEntityAndCheck(team, json, ADMIN_AUTH_HEADERS, REVERT, change); + change = getChangeDescription(team, MINOR_UPDATE); + fieldDeleted( + change, "policies", List.of(POLICY1.getEntityReference(), POLICY2.getEntityReference())); + patchEntityAndCheck(team, json, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); } @Test @@ -768,13 +768,11 @@ void patch_ProfileWithSubscription(TestInfo test) throws IOException, URISyntaxE fieldUpdated(change, "profile", PROFILE, profile1); team = patchEntityAndCheck(team, json, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); - // Remove profile from the team - Change from this PATCH and previous are consolidated to no - // change json = JsonUtils.pojoToJson(team); team.withProfile(null); - change = getChangeDescription(team, CHANGE_CONSOLIDATED); - fieldDeleted(change, "profile", PROFILE); - patchEntityAndCheck(team, json, ADMIN_AUTH_HEADERS, CHANGE_CONSOLIDATED, change); + change = getChangeDescription(team, MINOR_UPDATE); + fieldDeleted(change, "profile", profile1); + patchEntityAndCheck(team, json, ADMIN_AUTH_HEADERS, MINOR_UPDATE, change); } @Test diff --git a/openmetadata-spec/pom.xml b/openmetadata-spec/pom.xml index 63c297fd23d4..cfaea4fe7159 100644 --- a/openmetadata-spec/pom.xml +++ b/openmetadata-spec/pom.xml @@ -15,6 +15,8 @@ 17 17 2.0.12-1 + 3.6.0 + 3.3.1 diff --git a/openmetadata-spec/src/main/java/org/openmetadata/schema/EntityInterface.java b/openmetadata-spec/src/main/java/org/openmetadata/schema/EntityInterface.java index ed0051ab141c..7414dc92e755 100644 --- a/openmetadata-spec/src/main/java/org/openmetadata/schema/EntityInterface.java +++ b/openmetadata-spec/src/main/java/org/openmetadata/schema/EntityInterface.java @@ -108,6 +108,10 @@ default LifeCycle getLifeCycle() { return null; } + default AssetCertification getCertification() { + return null; + } + void setId(UUID id); void setDescription(String description); @@ -178,6 +182,10 @@ default void setLifeCycle(LifeCycle lifeCycle) { /* no-op implementation to be overridden */ } + default void setCertification(AssetCertification certification) { + /* no-op implementation to be overridden */ + } + T withHref(URI href); @JsonIgnore diff --git a/openmetadata-spec/src/main/resources/json/schema/configuration/assetCertificationSettings.json b/openmetadata-spec/src/main/resources/json/schema/configuration/assetCertificationSettings.json new file mode 100644 index 000000000000..17e2cfba42b4 --- /dev/null +++ b/openmetadata-spec/src/main/resources/json/schema/configuration/assetCertificationSettings.json @@ -0,0 +1,20 @@ +{ + "$id": "https://open-metadata.org/schema/configuration/assetCertificationSettings.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "AssetCertificationSettings", + "description": "This schema defines the Asset Certification Settings.", + "type": "object", + "javaType": "org.openmetadata.schema.configuration.AssetCertificationSettings", + "properties": { + "allowedClassification": { + "type": "string", + "description": "Classification that can be used for certifications." + }, + "validityPeriod": { + "type": "string", + "description": "ISO 8601 duration for the validity period." + } + }, + "required": ["allowedClassification", "validityPeriod"], + "additionalProperties": false +} \ No newline at end of file diff --git a/openmetadata-spec/src/main/resources/json/schema/configuration/limitsConfiguration.json b/openmetadata-spec/src/main/resources/json/schema/configuration/limitsConfiguration.json index e9ee55cd7c07..17b372354b77 100644 --- a/openmetadata-spec/src/main/resources/json/schema/configuration/limitsConfiguration.json +++ b/openmetadata-spec/src/main/resources/json/schema/configuration/limitsConfiguration.json @@ -1,7 +1,7 @@ { "$id": "https://open-metadata.org/schema/entity/configuration/limitsConfiguration.json", "$schema": "http://json-schema.org/draft-07/schema#", - "title": "FernetConfiguration", + "title": "LimitsConfiguration", "description": "This schema defines the Limits Configuration.", "type": "object", "javaType": "org.openmetadata.schema.configuration.LimitsConfiguration", diff --git a/openmetadata-spec/src/main/resources/json/schema/configuration/lineageSettings.json b/openmetadata-spec/src/main/resources/json/schema/configuration/lineageSettings.json new file mode 100644 index 000000000000..64b7664a8203 --- /dev/null +++ b/openmetadata-spec/src/main/resources/json/schema/configuration/lineageSettings.json @@ -0,0 +1,43 @@ +{ + "$id": "https://open-metadata.org/schema/entity/configuration/lineageSettings.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "LineageSettings", + "description": "This schema defines the Lineage Settings.", + "type": "object", + "javaType": "org.openmetadata.schema.api.lineage.LineageSettings", + "definitions": { + "lineageLayer": { + "javaType": "org.openmetadata.schema.api.lineage.LineageLayer", + "description": "Lineage Layers", + "type": "string", + "enum": [ + "EntityLineage", + "ColumnLevelLineage", + "DataObservability" + ], + "default": "EntityLineage" + } + }, + "properties": { + "upstreamDepth": { + "description": "Upstream Depth for Lineage.", + "type": "integer", + "default": 2, + "minimum": 1, + "maximum": 5 + }, + "downstreamDepth": { + "description": "DownStream Depth for Lineage.", + "type": "integer", + "default": 2, + "minimum": 1, + "maximum": 5 + }, + "lineageLayer": { + "description": "Lineage Layer.", + "$ref": "#/definitions/lineageLayer" + } + }, + "required": ["upstreamDepth", "downstreamDepth", "lineageLayer"], + "additionalProperties": false +} \ No newline at end of file diff --git a/openmetadata-spec/src/main/resources/json/schema/configuration/searchSettings.json b/openmetadata-spec/src/main/resources/json/schema/configuration/searchSettings.json index a03f4bccb376..1441f4f472b9 100644 --- a/openmetadata-spec/src/main/resources/json/schema/configuration/searchSettings.json +++ b/openmetadata-spec/src/main/resources/json/schema/configuration/searchSettings.json @@ -4,7 +4,7 @@ "title": "SearchSettings", "description": "This schema defines the Rbac Search Configuration.", "type": "object", - "javaType": "org.openmetadata.schema.api.searcg.SearchSettings", + "javaType": "org.openmetadata.schema.api.search.SearchSettings", "properties": { "enableAccessControl": { "type": "boolean", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/apiCollection.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/apiCollection.json index f39bf6481ad9..c23d64bc34ec 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/apiCollection.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/apiCollection.json @@ -105,6 +105,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/apiEndpoint.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/apiEndpoint.json index dcbb280247ff..481485d5d8c3 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/apiEndpoint.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/apiEndpoint.json @@ -167,6 +167,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/chart.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/chart.json index 61f110da93bf..425a3ae9b899 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/chart.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/chart.json @@ -155,6 +155,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/container.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/container.json index 3c29afe3119d..b1ad94e9f582 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/container.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/container.json @@ -189,6 +189,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/dashboard.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/dashboard.json index fcc644b729ac..9d1d0019ec98 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/dashboard.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/dashboard.json @@ -143,6 +143,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/dashboardDataModel.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/dashboardDataModel.json index eaa6aa0cbc43..1831dc3006c2 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/dashboardDataModel.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/dashboardDataModel.json @@ -166,6 +166,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/database.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/database.json index 1cbea85c665d..bcc0f601d07a 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/database.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/database.json @@ -121,6 +121,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/databaseSchema.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/databaseSchema.json index 724bae7004f1..ceb4c41b4442 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/databaseSchema.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/databaseSchema.json @@ -117,6 +117,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/metric.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/metric.json index 59005273b0b3..6840f1928721 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/metric.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/metric.json @@ -195,6 +195,9 @@ "extension": { "description": "Entity extension data with custom attributes added to the entity.", "$ref": "../../type/basic.json#/definitions/entityExtension" + }, + "certification": { + "$ref": "../../type/assetCertification.json" } }, "required": ["id", "name"], diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/mlmodel.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/mlmodel.json index 1281b819f934..6d9621a28cd9 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/mlmodel.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/mlmodel.json @@ -285,6 +285,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/pipeline.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/pipeline.json index 92fc0dd6433e..72456dbe0fbf 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/pipeline.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/pipeline.json @@ -271,6 +271,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/searchIndex.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/searchIndex.json index 8a3d1be2baee..062811153769 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/searchIndex.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/searchIndex.json @@ -248,6 +248,9 @@ "description": "Life Cycle of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/storedProcedure.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/storedProcedure.json index b4cdffbe6235..70ef8b5f0f6c 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/storedProcedure.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/storedProcedure.json @@ -158,6 +158,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/table.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/table.json index 3d20fcda1a3d..9e8a93917dfa 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/table.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/table.json @@ -1121,6 +1121,9 @@ "description": "Life Cycle of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/data/topic.json b/openmetadata-spec/src/main/resources/json/schema/entity/data/topic.json index c3381b0c23ec..f153ce2dfc59 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/data/topic.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/data/topic.json @@ -174,6 +174,9 @@ "description": "Life Cycle properties of the entity", "$ref": "../../type/lifeCycle.json" }, + "certification": { + "$ref": "../../type/assetCertification.json" + }, "sourceHash": { "description": "Source hash of the entity", "type": "string", diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/database/exasolConnection.json b/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/database/exasolConnection.json new file mode 100644 index 000000000000..f78125c774e6 --- /dev/null +++ b/openmetadata-spec/src/main/resources/json/schema/entity/services/connections/database/exasolConnection.json @@ -0,0 +1,86 @@ +{ + "$id": "https://open-metadata.org/schema/entity/services/connections/database/exasolConnection.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ExasolConnection", + "description": "Exasol Database Connection Config", + "type": "object", + "javaType": "org.openmetadata.schema.services.connections.database.ExasolConnection", + "definitions": { + "exasolType": { + "description": "Service type.", + "type": "string", + "enum": [ + "Exasol" + ], + "default": "Exasol" + }, + "exasolScheme": { + "description": "SQLAlchemy driver scheme options.", + "type": "string", + "enum": [ + "exa+websocket" + ], + "default": "exa+websocket" + } + }, + "properties": { + "type": { + "title": "Service Type", + "description": "Service Type", + "$ref": "#/definitions/exasolType", + "default": "Exasol" + }, + "scheme": { + "title": "Connection Scheme", + "description": "SQLAlchemy driver scheme options.", + "$ref": "#/definitions/exasolScheme", + "default": "exa+websocket" + }, + "username": { + "title": "Username", + "description": "Username to connect to Exasol. This user should have privileges to read all the metadata in Exasol.", + "type": "string" + }, + "password": { + "title": "Password", + "description": "Password to connect to Exasol.", + "type": "string", + "format": "password" + }, + "hostPort": { + "title": "Host and Port", + "description": "Host and port of the source service.", + "type": "string", + "default": "127.0.0.1:8563" + }, + "tls": { + "title": "SSL/TLS Settings", + "description": "Client SSL/TLS settings.", + "type": "string", + "enum": [ + "disable-tls", + "ignore-certificate", + "validate-certificate" + ], + "default": "validate-certificate" + }, + "connectionOptions": { + "title": "Connection Options", + "$ref": "../connectionBasicType.json#/definitions/connectionOptions" + }, + "connectionArguments": { + "title": "Connection Arguments", + "$ref": "../connectionBasicType.json#/definitions/connectionArguments" + }, + "supportsMetadataExtraction": { + "title": "Supports Metadata Extraction", + "$ref": "../connectionBasicType.json#/definitions/supportsMetadataExtraction" + } + }, + "additionalProperties": false, + "required": [ + "hostPort", + "username", + "password" + ] +} diff --git a/openmetadata-spec/src/main/resources/json/schema/entity/services/databaseService.json b/openmetadata-spec/src/main/resources/json/schema/entity/services/databaseService.json index c17f413c563c..205324eac38c 100644 --- a/openmetadata-spec/src/main/resources/json/schema/entity/services/databaseService.json +++ b/openmetadata-spec/src/main/resources/json/schema/entity/services/databaseService.json @@ -57,7 +57,8 @@ "Iceberg", "Teradata", "SapErp", - "Synapse" + "Synapse", + "Exasol" ], "javaEnums": [ { @@ -188,6 +189,9 @@ }, { "name": "Synapse" + }, + { + "name": "Exasol" } ] }, @@ -323,6 +327,9 @@ }, { "$ref": "./connections/database/synapseConnection.json" + }, + { + "$ref": "./connections/database/exasolConnection.json" } ] } diff --git a/openmetadata-spec/src/main/resources/json/schema/events/api/eventSubscriptionDiagnosticInfo.json b/openmetadata-spec/src/main/resources/json/schema/events/api/eventSubscriptionDiagnosticInfo.json new file mode 100644 index 000000000000..5fb1ffd030db --- /dev/null +++ b/openmetadata-spec/src/main/resources/json/schema/events/api/eventSubscriptionDiagnosticInfo.json @@ -0,0 +1,35 @@ +{ + "$id": "https://open-metadata.org/schema/events/api/eventSubscriptionDiagnosticInfo.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Event Subscription Status Response", + "description": "Schema defining the response for event subscription status, including details about processed and unprocessed events.", + "type": "object", + "javaType": "org.openmetadata.schema.api.events.EventSubscriptionDiagnosticInfo", + "properties": { + "latestOffset": { + "description": "The latest offset of the event in the system.", + "existingJavaType": "java.lang.Long" + }, + "currentOffset": { + "description": "The current offset of the event subscription.", + "existingJavaType": "java.lang.Long" + }, + "hasProcessedAllEvents": { + "description": "Indicates whether all events have been processed.", + "type": "boolean" + }, + "unprocessedEventsCount": { + "description": "The count of unprocessed events.", + "existingJavaType": "java.lang.Long" + }, + "unprocessedEventsList": { + "description": "The list of unprocessed events.", + "type": "array", + "items": { + "$ref": "../../type/changeEvent.json" + + } + } + }, + "additionalProperties": false +} \ No newline at end of file diff --git a/openmetadata-spec/src/main/resources/json/schema/events/api/typedEvent.json b/openmetadata-spec/src/main/resources/json/schema/events/api/typedEvent.json new file mode 100644 index 000000000000..aa33e26d5264 --- /dev/null +++ b/openmetadata-spec/src/main/resources/json/schema/events/api/typedEvent.json @@ -0,0 +1,36 @@ +{ + "$id": "https://open-metadata.org/schema/events/api/typedEvent.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Typed Event", + "description": "Schema defining a Typed Event with its status, data, and timestamp.", + "type": "object", + "javaType": "org.openmetadata.service.resources.events.subscription.TypedEvent", + "properties": { + "status": { + "description": "The status of the event, such as 'failed', 'successful', or 'unprocessed'.", + "type": "string", + "enum": ["failed", "successful", "unprocessed"] + }, + "data": { + "description": "The event data, which can be of different types depending on the status.", + "type": "array", + "items": { + "oneOf": [ + { + "$ref": "../../type/changeEvent.json" + }, + { + "$ref": "../failedEventResponse.json" + } + ] + } + }, + "timestamp": { + "description": "The timestamp when the event occurred, represented as a long.", + "type": "number", + "format": "int64" + } + }, + "required": ["status", "data", "timestamp"], + "additionalProperties": false +} \ No newline at end of file diff --git a/openmetadata-spec/src/main/resources/json/schema/events/failedEventResponse.json b/openmetadata-spec/src/main/resources/json/schema/events/failedEventResponse.json new file mode 100644 index 000000000000..ad51d742fd99 --- /dev/null +++ b/openmetadata-spec/src/main/resources/json/schema/events/failedEventResponse.json @@ -0,0 +1,31 @@ +{ + "$id": "https://open-metadata.org/schema/events/failedEventResponse.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "FailedEvents", + "description": "Failed Events Schema", + "type": "object", + "javaType": "org.openmetadata.schema.entity.events.FailedEventResponse", + "properties": { + "failingSubscriptionId": { + "description": "Unique identifier that identifies this Event Subscription.", + "$ref": "../type/basic.json#/definitions/uuid" + }, + "changeEvent": { + "description": "Change Event that failed", + "$ref": "../type/changeEvent.json" + }, + "reason": { + "description": "Reason for failure", + "type": "string" + }, + "source": { + "description": "Source of the failed event", + "type": "string" + }, + "timestamp": { + "description": "Time of Failure", + "$ref": "../type/basic.json#/definitions/timestamp" + } + }, + "additionalProperties": false +} \ No newline at end of file diff --git a/openmetadata-spec/src/main/resources/json/schema/settings/settings.json b/openmetadata-spec/src/main/resources/json/schema/settings/settings.json index a9e7f7e7f3c6..0811f5e8fc8f 100644 --- a/openmetadata-spec/src/main/resources/json/schema/settings/settings.json +++ b/openmetadata-spec/src/main/resources/json/schema/settings/settings.json @@ -30,7 +30,9 @@ "slackInstaller", "slackState", "profilerConfiguration", - "searchSettings" + "searchSettings", + "assetCertificationSettings", + "lineageSettings" ] } }, @@ -76,6 +78,12 @@ }, { "$ref": "../configuration/searchSettings.json" + }, + { + "$ref": "../configuration/assetCertificationSettings.json" + }, + { + "$ref": "../configuration/lineageSettings.json" } ] } diff --git a/openmetadata-spec/src/main/resources/json/schema/type/assetCertification.json b/openmetadata-spec/src/main/resources/json/schema/type/assetCertification.json new file mode 100644 index 000000000000..a35020503750 --- /dev/null +++ b/openmetadata-spec/src/main/resources/json/schema/type/assetCertification.json @@ -0,0 +1,22 @@ +{ "$id": "https://open-metadata.org/schema/type/assetCertification.json", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "AssetCertification", + "description": "Defines the Asset Certification schema.", + "javaType": "org.openmetadata.schema.type.AssetCertification", + "type": "object", + "properties": { + "tagLabel": { + "$ref": "./tagLabel.json" + }, + "appliedDate": { + "description": "The date when the certification was applied.", + "$ref": "basic.json#/definitions/timestamp" + }, + "expiryDate": { + "description": "The date when the certification expires.", + "$ref": "basic.json#/definitions/timestamp" + } + }, + "required": ["tagLabel", "appliedDate", "expiryDate"], + "additionalProperties": false +} \ No newline at end of file diff --git a/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/GlossaryVersionPage.spec.ts b/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/GlossaryVersionPage.spec.ts index 60e66e2d2a42..a5f16c6afc81 100644 --- a/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/GlossaryVersionPage.spec.ts +++ b/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/GlossaryVersionPage.spec.ts @@ -89,7 +89,7 @@ test('Glossary', async ({ page }) => { await page.reload(); const versionPageResponse = page.waitForResponse( - `/api/v1/glossaries/${glossary.responseData.id}/versions/0.2` + `/api/v1/glossaries/${glossary.responseData.id}/versions/0.3` ); await page.click('[data-testid="version-button"]'); await versionPageResponse; diff --git a/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/Tags.spec.ts b/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/Tags.spec.ts index 243497d80cae..c6fa884de535 100644 --- a/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/Tags.spec.ts +++ b/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/Tags.spec.ts @@ -173,7 +173,9 @@ test('Classification Page', async ({ page }) => { ) ).not.toBeVisible(); - await expect(page.getByTestId('saveAssociatedTag')).not.toBeVisible(); + await expect(page.getByText('No Tags are available')).toBeVisible(); + + await expect(page.getByTestId('saveAssociatedTag')).toBeDisabled(); // Re-enable the disabled Classification await classification.visitPage(page); @@ -207,6 +209,7 @@ test('Classification Page', async ({ page }) => { }); await test.step('Create classification with validation checks', async () => { + await classification.visitPage(page); await page.click('[data-testid="add-classification"]'); await page.waitForSelector('.ant-modal-content', { state: 'visible', diff --git a/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/Teams.spec.ts b/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/Teams.spec.ts index 23ef97fe4acc..a327a387d807 100644 --- a/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/Teams.spec.ts +++ b/openmetadata-ui/src/main/resources/ui/playwright/e2e/Pages/Teams.spec.ts @@ -13,23 +13,26 @@ import test, { expect } from '@playwright/test'; import { GlobalSettingOptions } from '../../constant/settings'; import { EntityTypeEndpoint } from '../../support/entity/Entity.interface'; +import { TableClass } from '../../support/entity/TableClass'; import { TeamClass } from '../../support/team/TeamClass'; import { UserClass } from '../../support/user/UserClass'; import { - createNewPage, - descriptionBox, - getApiContext, - redirectToHomePage, - toastNotification, - uuid + createNewPage, + descriptionBox, + getApiContext, + redirectToHomePage, + toastNotification, + uuid, } from '../../utils/common'; import { addMultiOwner } from '../../utils/entity'; import { settingClick } from '../../utils/sidebar'; import { - createTeam, - hardDeleteTeam, - searchTeam, - softDeleteTeam + addTeamOwnerToEntity, + createTeam, + hardDeleteTeam, + searchTeam, + softDeleteTeam, + verifyAssetsInTeamsPage, } from '../../utils/team'; // use the admin user to login @@ -493,4 +496,70 @@ test.describe('Teams Page', () => { await team.delete(apiContext); } }); + + test('Team assets should', async ({ page }) => { + const { apiContext, afterAction } = await getApiContext(page); + const id = uuid(); + + const table1 = new TableClass(); + const table2 = new TableClass(); + const table3 = new TableClass(); + const table4 = new TableClass(); + + const team1 = new TeamClass({ + name: `pw%percent-${id}`, + displayName: `pw team percent ${id}`, + description: 'playwright team with percent description', + teamType: 'Group', + }); + const team2 = new TeamClass({ + name: `pw&-${id}`, + displayName: `pw team ampersand ${id}`, + description: 'playwright team with ampersand description', + teamType: 'Group', + }); + const team3 = new TeamClass({ + name: `pw.team.dot-${id}`, + displayName: `pw.team.dot ${id}`, + description: 'playwright team with dot description', + teamType: 'Group', + }); + const team4 = new TeamClass({ + name: `pw team space-${id}`, + displayName: `pw team space ${id}`, + description: 'playwright team with space description', + teamType: 'Group', + }); + + await table1.create(apiContext); + await table2.create(apiContext); + await table3.create(apiContext); + await table4.create(apiContext); + await team1.create(apiContext); + await team2.create(apiContext); + await team3.create(apiContext); + await team4.create(apiContext); + + try { + await addTeamOwnerToEntity(page, table1, team1); + await addTeamOwnerToEntity(page, table2, team2); + await addTeamOwnerToEntity(page, table3, team3); + await addTeamOwnerToEntity(page, table4, team4); + + await verifyAssetsInTeamsPage(page, table1, team1, 1); + await verifyAssetsInTeamsPage(page, table2, team2, 1); + await verifyAssetsInTeamsPage(page, table3, team3, 1); + await verifyAssetsInTeamsPage(page, table4, team4, 1); + } finally { + await table1.delete(apiContext); + await table2.delete(apiContext); + await table3.delete(apiContext); + await table4.delete(apiContext); + await team1.delete(apiContext); + await team2.delete(apiContext); + await team3.delete(apiContext); + await team4.delete(apiContext); + await afterAction(); + } + }); }); diff --git a/openmetadata-ui/src/main/resources/ui/playwright/e2e/VersionPages/ClassificationVersionPage.spec.ts b/openmetadata-ui/src/main/resources/ui/playwright/e2e/VersionPages/ClassificationVersionPage.spec.ts index 6397dbd8005a..e67283bd2dc2 100644 --- a/openmetadata-ui/src/main/resources/ui/playwright/e2e/VersionPages/ClassificationVersionPage.spec.ts +++ b/openmetadata-ui/src/main/resources/ui/playwright/e2e/VersionPages/ClassificationVersionPage.spec.ts @@ -80,12 +80,12 @@ test('Classification version page', async ({ page }) => { await patchClassificationResponse; // Verify disabled state - await page.click('[data-testid="version-button"]:has-text("0.2")'); + await page.click('[data-testid="version-button"]:has-text("0.3")'); await expect(page.locator('[data-testid="disabled"]')).toBeVisible(); // Toggle back to enabled - await page.click('[data-testid="version-button"]:has-text("0.2")'); + await page.click('[data-testid="version-button"]:has-text("0.3")'); await page.click('[data-testid="manage-button"]'); const patchClassificationResponse2 = page.waitForResponse( `/api/v1/classifications/${classification.responseData?.id}` @@ -94,7 +94,7 @@ test('Classification version page', async ({ page }) => { await patchClassificationResponse2; // Verify enabled state - await page.click('[data-testid="version-button"]:has-text("0.2")'); + await page.click('[data-testid="version-button"]:has-text("0.4")'); await expect( page.locator(`[data-testid="classification-${classification.data.name}"]`) diff --git a/openmetadata-ui/src/main/resources/ui/playwright/e2e/VersionPages/ServiceEntityVersionPage.spec.ts b/openmetadata-ui/src/main/resources/ui/playwright/e2e/VersionPages/ServiceEntityVersionPage.spec.ts index 895c59e86627..d083c18c1ecc 100644 --- a/openmetadata-ui/src/main/resources/ui/playwright/e2e/VersionPages/ServiceEntityVersionPage.spec.ts +++ b/openmetadata-ui/src/main/resources/ui/playwright/e2e/VersionPages/ServiceEntityVersionPage.spec.ts @@ -153,7 +153,7 @@ entities.forEach((EntityClass) => { type: 'Users', }); - const versionDetailResponse = page.waitForResponse(`**/versions/0.2`); + const versionDetailResponse = page.waitForResponse(`**/versions/0.3`); await page.locator('[data-testid="version-button"]').click(); await versionDetailResponse; @@ -169,7 +169,7 @@ entities.forEach((EntityClass) => { await assignTier(page, 'Tier1', entity.endpoint); - const versionDetailResponse = page.waitForResponse(`**/versions/0.2`); + const versionDetailResponse = page.waitForResponse(`**/versions/0.4`); await page.locator('[data-testid="version-button"]').click(); await versionDetailResponse; @@ -210,7 +210,7 @@ entities.forEach((EntityClass) => { await expect(deletedBadge).toHaveText('Deleted'); - const versionDetailResponse = page.waitForResponse(`**/versions/0.3`); + const versionDetailResponse = page.waitForResponse(`**/versions/0.5`); await page.locator('[data-testid="version-button"]').click(); await versionDetailResponse; diff --git a/openmetadata-ui/src/main/resources/ui/playwright/utils/tag.ts b/openmetadata-ui/src/main/resources/ui/playwright/utils/tag.ts index ffcc3c253bf5..a3d13d81bed1 100644 --- a/openmetadata-ui/src/main/resources/ui/playwright/utils/tag.ts +++ b/openmetadata-ui/src/main/resources/ui/playwright/utils/tag.ts @@ -136,7 +136,7 @@ export const addTagToTableColumn = async ( await expect( page.locator( - `[data-testid="classification-tags-${columnNumber}"] [data-testid="tags-container"] [data-testid="icon"]` + `[data-testid="classification-tags-${columnNumber}"] [data-testid="tags-container"] [data-testid="tag-${tagFqn}"]` ) ).toBeVisible(); }; diff --git a/openmetadata-ui/src/main/resources/ui/playwright/utils/team.ts b/openmetadata-ui/src/main/resources/ui/playwright/utils/team.ts index 2de12ad1fe3e..382f5c2f0146 100644 --- a/openmetadata-ui/src/main/resources/ui/playwright/utils/team.ts +++ b/openmetadata-ui/src/main/resources/ui/playwright/utils/team.ts @@ -11,7 +11,10 @@ * limitations under the License. */ import { APIRequestContext, expect, Page } from '@playwright/test'; +import { TableClass } from '../support/entity/TableClass'; +import { TeamClass } from '../support/team/TeamClass'; import { descriptionBox, toastNotification, uuid } from './common'; +import { addOwner } from './entity'; import { validateFormNameFieldInput } from './form'; const TEAM_TYPES = ['Department', 'Division', 'Group']; @@ -267,3 +270,49 @@ export const searchTeam = async ( await expect(page.locator('table')).toContainText(teamName); } }; + +export const addTeamOwnerToEntity = async ( + page: Page, + table: TableClass, + team: TeamClass +) => { + await table.visitEntityPage(page); + await addOwner({ + page, + owner: team.data.displayName, + type: 'Teams', + endpoint: table.endpoint, + dataTestId: 'data-assets-header', + }); +}; + +export const verifyAssetsInTeamsPage = async ( + page: Page, + table: TableClass, + team: TeamClass, + assetCount: number +) => { + const fullyQualifiedName = table.entityResponseData?.['fullyQualifiedName']; + await table.visitEntityPage(page); + + await expect( + page.getByTestId('data-assets-header').getByTestId('owner-link') + ).toContainText(team.data.displayName); + + await page + .getByTestId('data-assets-header') + .locator(`a:has-text("${team.data.displayName}")`) + .click(); + + const res = page.waitForResponse('/api/v1/search/query?*size=15'); + await page.getByTestId('assets').click(); + await res; + + await expect( + page.locator(`[data-testid="table-data-card_${fullyQualifiedName}"]`) + ).toBeVisible(); + + await expect( + page.getByTestId('assets').getByTestId('filter-count') + ).toContainText(assetCount.toString()); +}; diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Exasol.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Exasol.md new file mode 100644 index 000000000000..87d741994f88 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Exasol.md @@ -0,0 +1,50 @@ +# Exasol + +In this section, we provide guides and references for using the Exasol connector. + +## Requirements + +* Exasol >= 7.1 + +## Connection Details + +$$section +### Connection Scheme $(id="scheme") + +SQLAlchemy driver scheme options. +$$ + +$$section +### Username $(id="username") + +Username to connect to Exasol. This user should have privileges to read all the metadata in Exasol. +$$ + +$$section +### Password $(id="password") + +Password of the user connecting to Exasol. +$$ + +$$section +### Host and Port $(id="hostPort") + +This parameter specifies the host and port of the Exasol instance. This should be specified as a string in the format `hostname:port`. For example, you might set the hostPort parameter to `localhost:8563`. + +If you are running the OpenMetadata ingestion in a docker and your services are hosted on the `localhost`, then use `host.docker.internal:8563` as the value. +$$ + +$$section +### SSL/TLS Settings $(id="tls") +Mode/setting for SSL validation: + +#### validate-certificate (**default**) +Uses Transport Layer Security (TLS) and validates the server certificate using system certificate stores. + +#### ignore-certificate +Uses Transport Layer Security (TLS) but disables the validation of the server certificate. This should not be used in production. It can be useful during testing with self-signed certificates. + +#### disable-tls +Does not use any Transport Layer Security (TLS). Data will be sent in plain text (no encryption). +While this may be helpful in rare cases of debugging, make sure you do not use this in production. + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/img/service-icon-exasol.png b/openmetadata-ui/src/main/resources/ui/src/assets/img/service-icon-exasol.png new file mode 100644 index 000000000000..5efdfb486e0c Binary files /dev/null and b/openmetadata-ui/src/main/resources/ui/src/assets/img/service-icon-exasol.png differ diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/customproperties/number.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/customproperties/number.svg index 715ebe96b0c9..7bf22c0aebc8 100644 --- a/openmetadata-ui/src/main/resources/ui/src/assets/svg/customproperties/number.svg +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/customproperties/number.svg @@ -10,4 +10,4 @@ - + \ No newline at end of file diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/array.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/array.svg new file mode 100644 index 000000000000..d95306000d8d --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/array.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/binary.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/binary.svg new file mode 100644 index 000000000000..27456948e39e --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/binary.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/bitmap.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/bitmap.svg new file mode 100644 index 000000000000..06b1f97a63db --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/bitmap.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/boolean.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/boolean.svg new file mode 100644 index 000000000000..0f3e5a91c423 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/boolean.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/data-time-range.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/data-time-range.svg new file mode 100644 index 000000000000..00b6a4cc58dc --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/data-time-range.svg @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/date.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/date.svg new file mode 100644 index 000000000000..a71374a869e2 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/date.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/decimal.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/decimal.svg new file mode 100644 index 000000000000..d5f97422442e --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/decimal.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/double.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/double.svg new file mode 100644 index 000000000000..fb555df2ee10 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/double.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/duration.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/duration.svg new file mode 100644 index 000000000000..54726eba4b5c --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/duration.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/enum.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/enum.svg new file mode 100644 index 000000000000..28711a474001 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/enum.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/error.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/error.svg new file mode 100644 index 000000000000..90ab9ba3d21f --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/error.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/geometry.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/geometry.svg new file mode 100644 index 000000000000..f79c0148b872 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/geometry.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/integer.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/integer.svg new file mode 100644 index 000000000000..c6249dbc02a8 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/integer.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/ipv6.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/ipv6.svg new file mode 100644 index 000000000000..dc2580a56995 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/ipv6.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/json.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/json.svg new file mode 100644 index 000000000000..49541c11b01e --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/json.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/map.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/map.svg new file mode 100644 index 000000000000..bcfb33384b6f --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/map.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/markdown.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/markdown.svg new file mode 100644 index 000000000000..382137c453a0 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/markdown.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/money.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/money.svg new file mode 100644 index 000000000000..17b1638a9b8c --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/money.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/null.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/null.svg new file mode 100644 index 000000000000..cd0be1909d28 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/null.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/numeric.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/numeric.svg new file mode 100644 index 000000000000..d71988a3808a --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/numeric.svg @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/polygon.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/polygon.svg new file mode 100644 index 000000000000..9d57727c2354 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/polygon.svg @@ -0,0 +1,4 @@ + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/record.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/record.svg new file mode 100644 index 000000000000..0a67f26c1441 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/record.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/string.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/string.svg new file mode 100644 index 000000000000..a8237a6a6e8f --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/string.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/struct.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/struct.svg new file mode 100644 index 000000000000..e1b29fd07f74 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/struct.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/time-interval.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/time-interval.svg new file mode 100644 index 000000000000..4c495af21134 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/time-interval.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/time.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/time.svg new file mode 100644 index 000000000000..8fa140747efc --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/time.svg @@ -0,0 +1,4 @@ + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/timestamp.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/timestamp.svg new file mode 100644 index 000000000000..c5abb021db71 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/timestamp.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/ts-query.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/ts-query.svg new file mode 100644 index 000000000000..70ace324e0ed --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/ts-query.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/union.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/union.svg new file mode 100644 index 000000000000..124bfe6836ff --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/union.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/unknown.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/unknown.svg new file mode 100644 index 000000000000..c35ceb241701 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/unknown.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/varchar.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/varchar.svg new file mode 100644 index 000000000000..51462414841e --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/varchar.svg @@ -0,0 +1,4 @@ + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/variant.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/variant.svg new file mode 100644 index 000000000000..112edee0009f --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/variant.svg @@ -0,0 +1,3 @@ + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/xml.svg b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/xml.svg new file mode 100644 index 000000000000..90287895ccd2 --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/assets/svg/data-type-icon/xml.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/openmetadata-ui/src/main/resources/ui/src/components/Classifications/ClassificationDetails/ClassificationDetails.tsx b/openmetadata-ui/src/main/resources/ui/src/components/Classifications/ClassificationDetails/ClassificationDetails.tsx index 30ab4a064879..4f2b4b5ef434 100644 --- a/openmetadata-ui/src/main/resources/ui/src/components/Classifications/ClassificationDetails/ClassificationDetails.tsx +++ b/openmetadata-ui/src/main/resources/ui/src/components/Classifications/ClassificationDetails/ClassificationDetails.tsx @@ -521,9 +521,10 @@ const ClassificationDetails = forwardRef( size="small" /> - {showPagination && !isTagsLoading && ( + {showPagination && ( { {showPagination && ( { { + {onShowSizeChange && ( ({ label: `${size} / Page`, diff --git a/openmetadata-ui/src/main/resources/ui/src/components/common/OwnerLabel/OwnerLabel.component.tsx b/openmetadata-ui/src/main/resources/ui/src/components/common/OwnerLabel/OwnerLabel.component.tsx index 6af04681b655..79bd7cc76676 100644 --- a/openmetadata-ui/src/main/resources/ui/src/components/common/OwnerLabel/OwnerLabel.component.tsx +++ b/openmetadata-ui/src/main/resources/ui/src/components/common/OwnerLabel/OwnerLabel.component.tsx @@ -109,7 +109,9 @@ export const OwnerLabel = ({ key={owner.id} to={ owner.type === OwnerType.TEAM - ? getTeamAndUserDetailsPath(owner.name ?? '') + ? getTeamAndUserDetailsPath( + owner.fullyQualifiedName ?? '' + ) : getUserPath(owner.name ?? '') }> {ownerDisplayName?.[index] ?? displayName} diff --git a/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/TabsLabel.component.tsx b/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/TabsLabel.component.tsx index 8ed89f97084a..5738452b1078 100644 --- a/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/TabsLabel.component.tsx +++ b/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/TabsLabel.component.tsx @@ -10,8 +10,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +import { Badge } from 'antd'; import { isNil } from 'lodash'; import React from 'react'; +import { useTranslation } from 'react-i18next'; import { getCountBadge } from '../../../utils/CommonUtils'; import './tabs-label.less'; import { TabsLabelProps } from './TabsLabel.interface'; @@ -22,7 +24,10 @@ const TabsLabel = ({ isActive, id, description, + isBeta, }: TabsLabelProps) => { + const { t } = useTranslation(); + return (
@@ -30,6 +35,9 @@ const TabsLabel = ({ {!isNil(count) && ( {getCountBadge(count, '', isActive)} )} + {isBeta && ( + + )}
{/* Note: add ".custom-menu-with-description" class in Menu component if need description in menu */} {description && ( diff --git a/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/TabsLabel.interface.ts b/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/TabsLabel.interface.ts index 7cc394600e1c..6d50498f314e 100644 --- a/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/TabsLabel.interface.ts +++ b/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/TabsLabel.interface.ts @@ -26,4 +26,5 @@ export interface TabsLabelProps { count?: number; isActive?: boolean; description?: string; + isBeta?: boolean; } diff --git a/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/tabs-label.less b/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/tabs-label.less index 325d2fd221c7..6337edcab392 100644 --- a/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/tabs-label.less +++ b/openmetadata-ui/src/main/resources/ui/src/components/common/TabsLabel/tabs-label.less @@ -20,4 +20,16 @@ color: @text-grey-muted; line-height: 12px; } + .service-beta-tag { + position: absolute; + top: 0px; + right: -20px; + + sup { + height: 16px; + display: flex; + justify-content: center; + align-items: center; + } + } } diff --git a/openmetadata-ui/src/main/resources/ui/src/constants/Services.constant.ts b/openmetadata-ui/src/main/resources/ui/src/constants/Services.constant.ts index 46f18c1a1b3d..3bd5bd0da8c4 100644 --- a/openmetadata-ui/src/main/resources/ui/src/constants/Services.constant.ts +++ b/openmetadata-ui/src/main/resources/ui/src/constants/Services.constant.ts @@ -36,6 +36,7 @@ import domo from '../assets/img/service-icon-domo.png'; import doris from '../assets/img/service-icon-doris.png'; import druid from '../assets/img/service-icon-druid.png'; import dynamodb from '../assets/img/service-icon-dynamodb.png'; +import exasol from '../assets/img/service-icon-exasol.png'; import fivetran from '../assets/img/service-icon-fivetran.png'; import flink from '../assets/img/service-icon-flink.png'; import gcs from '../assets/img/service-icon-gcs.png'; @@ -186,6 +187,7 @@ export const ALATIONSINK = alationsink; export const SAS = sas; export const OPENLINEAGE = openlineage; export const LOGO = logo; +export const EXASOL = exasol; export const AIRFLOW = airflow; export const PREFECT = prefect; diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/de-de.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/de-de.json index 2f391a4f2bbd..38b415eeb75d 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/de-de.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/de-de.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "Verbindungszeitüberschreitungen", "connector": "Connector", "constraint": "Constraint", + "constraint-plural": "Constraints", "consumer-aligned": "Consumer-aligned", "container": "Container", "container-column": "Container Column", @@ -772,6 +773,7 @@ "no-description": "Keine Beschreibung", "no-diff-available": "Keine Unterschiede verfügbar", "no-entity": "Keine {{entity}}", + "no-entity-available": "No {{entity}} are available", "no-entity-selected": "No {{entity}} Selected", "no-matching-data-asset": "Keine passenden Datenanlagen gefunden", "no-of-test": " Nr. der Tests", @@ -955,6 +957,7 @@ "related-metric-plural": "Related Metrics", "related-term-plural": "Verwandte Begriffe", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "Relevanz", "remove": "Entfernen", "remove-entity": "{{entity}} entfernen", diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/en-us.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/en-us.json index 076e46f8b47e..e99b74bb9984 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/en-us.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/en-us.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "Connection Timeout", "connector": "Connector", "constraint": "Constraint", + "constraint-plural": "Constraints", "consumer-aligned": "Consumer-aligned", "container": "Container", "container-column": "Container Column", @@ -772,6 +773,7 @@ "no-description": "No description", "no-diff-available": "No diff available", "no-entity": "No {{entity}}", + "no-entity-available": "No {{entity}} are available", "no-entity-selected": "No {{entity}} Selected", "no-matching-data-asset": "No matching data assets found", "no-of-test": " No. of Test", @@ -955,6 +957,7 @@ "related-metric-plural": "Related Metrics", "related-term-plural": "Related Terms", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "Relevance", "remove": "Remove", "remove-entity": "Remove {{entity}}", diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/es-es.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/es-es.json index 40c32b751c1a..7f672a7e6825 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/es-es.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/es-es.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "Tiempo de conexión expirado", "connector": "Conector", "constraint": "Constraint", + "constraint-plural": "Constraints", "consumer-aligned": "Alineado con el Consumidor", "container": "Contenedor", "container-column": "Container Column", @@ -772,6 +773,7 @@ "no-description": "Sin descripción", "no-diff-available": "Sin diferencia disponible", "no-entity": "No hay {{entity}}", + "no-entity-available": "No {{entity}} are available", "no-entity-selected": "No {{entity}} Selected", "no-matching-data-asset": "No se encontraron activos de datos coincidentes", "no-of-test": "No. de prueba", @@ -955,6 +957,7 @@ "related-metric-plural": "Related Metrics", "related-term-plural": "Términos relacionados", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "Relevancia", "remove": "Eliminar", "remove-entity": "Eliminar {{entity}}", diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/fr-fr.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/fr-fr.json index b1931a772b0f..987f1b824174 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/fr-fr.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/fr-fr.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "Délais d'Attente de Connexion", "connector": "Connecteur", "constraint": "Constraint", + "constraint-plural": "Constraints", "consumer-aligned": "Consumer-aligned", "container": "Conteneur", "container-column": "Container Column", @@ -772,6 +773,7 @@ "no-description": "Aucune description", "no-diff-available": "Aucune différence disponible", "no-entity": "Pas de {{entity}}", + "no-entity-available": "No {{entity}} are available", "no-entity-selected": "No {{entity}} Selected", "no-matching-data-asset": "Aucun actif de données trouvé", "no-of-test": " No. de Tests", @@ -955,6 +957,7 @@ "related-metric-plural": "Related Metrics", "related-term-plural": "Termes Liés", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "Pertinence", "remove": "Retirer", "remove-entity": "Retirer un·e {{entity}}", diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/gl-es.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/gl-es.json index 7929c9432ffa..e3c5b2d155fc 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/gl-es.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/gl-es.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "Tempos de espera da conexión", "connector": "Conector", "constraint": "Restrición", + "constraint-plural": "Constraints", "consumer-aligned": "Aliñado co consumidor", "container": "Contedor", "container-column": "Columna do contedor", @@ -955,6 +956,7 @@ "related-metric-plural": "Métricas relacionadas", "related-term-plural": "Termos relacionados", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "Relevancia", "remove": "Eliminar", "remove-entity": "Eliminar {{entity}}", diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/he-he.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/he-he.json index 03ed3ce596e0..d83c1b11b242 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/he-he.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/he-he.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "זמן קציבת החיבור", "connector": "מחבר", "constraint": "Constraint", + "constraint-plural": "Constraints", "consumer-aligned": "מכוון לצרכן", "container": "אחסון", "container-column": "Container Column", @@ -772,6 +773,7 @@ "no-description": "-", "no-diff-available": "אין הבדל זמין", "no-entity": "אין {{entity}}", + "no-entity-available": "No {{entity}} are available", "no-entity-selected": "No {{entity}} Selected", "no-matching-data-asset": "לא נמצאו נכסי נתונים תואמים", "no-of-test": "מספר הבדיקות", @@ -955,6 +957,7 @@ "related-metric-plural": "Related Metrics", "related-term-plural": "מונחים קשורים", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "רלוונטיות", "remove": "הסר", "remove-entity": "הסר {{entity}}", diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/ja-jp.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/ja-jp.json index a12fa061b7d9..be07eaa23682 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/ja-jp.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/ja-jp.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "接続のタイムアウト", "connector": "コネクタ", "constraint": "Constraint", + "constraint-plural": "Constraints", "consumer-aligned": "Consumer-aligned", "container": "コンテナ", "container-column": "Container Column", @@ -772,6 +773,7 @@ "no-description": "説明がありません", "no-diff-available": "差分を見ることはできません", "no-entity": "No {{entity}}", + "no-entity-available": "No {{entity}} are available", "no-entity-selected": "No {{entity}} Selected", "no-matching-data-asset": "マッチするデータアセットはありません", "no-of-test": " テスト番号", @@ -955,6 +957,7 @@ "related-metric-plural": "Related Metrics", "related-term-plural": "関連する用語", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "Relevance", "remove": "除外", "remove-entity": "{{entity}}を除外", diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/nl-nl.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/nl-nl.json index 034a65bb43e7..c8be1deca19d 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/nl-nl.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/nl-nl.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "Verbindingstime-out", "connector": "Connector", "constraint": "Constraint", + "constraint-plural": "Constraints", "consumer-aligned": "Consumentafgestemd", "container": "Container", "container-column": "Container Column", @@ -772,6 +773,7 @@ "no-description": "Geen beschrijving", "no-diff-available": "Geen verschil beschikbaar", "no-entity": "Geen {{entity}}", + "no-entity-available": "No {{entity}} are available", "no-entity-selected": "No {{entity}} Selected", "no-matching-data-asset": "Geen overeenkomende data-assets gevonden", "no-of-test": " Aantal tests", @@ -955,6 +957,7 @@ "related-metric-plural": "Related Metrics", "related-term-plural": "Gerelateerde termen", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "Relevantie", "remove": "Verwijderen", "remove-entity": "Verwijder {{entity}}", diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/pr-pr.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/pr-pr.json index 8700b871703c..630205917e26 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/pr-pr.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/pr-pr.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "اتمام زمان‌های اتصال", "connector": "اتصال‌دهنده", "constraint": "محدودیت", + "constraint-plural": "Constraints", "consumer-aligned": "هماهنگ با مصرف‌کننده", "container": "ظرف", "container-column": "ستون ظرف", @@ -772,6 +773,7 @@ "no-description": "بدون توضیح", "no-diff-available": "هیچ تفاوتی موجود نیست", "no-entity": "هیچ {{entity}}", + "no-entity-available": "No {{entity}} are available", "no-entity-selected": "هیچ {{entity}} انتخاب نشده", "no-matching-data-asset": "هیچ دارایی داده‌ی منطبق یافت نشد", "no-of-test": "تعداد تست", @@ -955,6 +957,7 @@ "related-metric-plural": "شاخص‌های مرتبط", "related-term-plural": "اصطلاحات مرتبط", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "مربوط بودن", "remove": "حذف", "remove-entity": "حذف {{entity}}", diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/pt-br.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/pt-br.json index dfcd92db0f58..34f500461529 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/pt-br.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/pt-br.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "Tempo Limite da Conexão", "connector": "Conector", "constraint": "Constraint", + "constraint-plural": "Constraints", "consumer-aligned": "Alinhado ao Consumidor", "container": "Contêiner", "container-column": "Container Column", @@ -772,6 +773,7 @@ "no-description": "Sem descrição", "no-diff-available": "Nenhuma diferença disponível", "no-entity": "Nenhum(a) {{entity}}", + "no-entity-available": "No {{entity}} are available", "no-entity-selected": "No {{entity}} Selected", "no-matching-data-asset": "Nenhum ativo de dados correspondente encontrado", "no-of-test": " Nº de Teste", @@ -955,6 +957,7 @@ "related-metric-plural": "Related Metrics", "related-term-plural": "Termos Relacionados", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "Relevância", "remove": "Remover", "remove-entity": "Remover {{entity}}", diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/ru-ru.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/ru-ru.json index 781dd3f4cecd..e0d61842979c 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/ru-ru.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/ru-ru.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "Время соединения вышло", "connector": "Коннектор", "constraint": "Constraint", + "constraint-plural": "Constraints", "consumer-aligned": "Consumer-aligned", "container": "Контейнер", "container-column": "Container Column", @@ -772,6 +773,7 @@ "no-description": "Нет описания", "no-diff-available": "Нет различий", "no-entity": "{{entity}} отсутствует", + "no-entity-available": "No {{entity}} are available", "no-entity-selected": "No {{entity}} Selected", "no-matching-data-asset": "Подходящие объекты данных не найдены", "no-of-test": "№ теста", @@ -955,6 +957,7 @@ "related-metric-plural": "Related Metrics", "related-term-plural": "Связанные термины", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "Актуальность", "remove": "Удалить", "remove-entity": "Удалить {{entity}}", diff --git a/openmetadata-ui/src/main/resources/ui/src/locale/languages/zh-cn.json b/openmetadata-ui/src/main/resources/ui/src/locale/languages/zh-cn.json index b364fe5b4a55..9e9f6fc0b64f 100644 --- a/openmetadata-ui/src/main/resources/ui/src/locale/languages/zh-cn.json +++ b/openmetadata-ui/src/main/resources/ui/src/locale/languages/zh-cn.json @@ -210,6 +210,7 @@ "connection-timeout-plural": "连接超时", "connector": "连接器", "constraint": "约束", + "constraint-plural": "Constraints", "consumer-aligned": "使用者对齐", "container": "存储容器", "container-column": "存储容器列", @@ -772,6 +773,7 @@ "no-description": "无描述", "no-diff-available": "没有可用的差异", "no-entity": "没有{{entity}}", + "no-entity-available": "No {{entity}} are available", "no-entity-selected": "No {{entity}} Selected", "no-matching-data-asset": "未找到匹配的数据资产", "no-of-test": "测试数量", @@ -955,6 +957,7 @@ "related-metric-plural": "Related Metrics", "related-term-plural": "关联术语", "relationship": "Relationship", + "relationship-type": "Relationship Type", "relevance": "相关性", "remove": "删除", "remove-entity": "删除{{entity}}", diff --git a/openmetadata-ui/src/main/resources/ui/src/pages/APICollectionPage/APIEndpointsTab.tsx b/openmetadata-ui/src/main/resources/ui/src/pages/APICollectionPage/APIEndpointsTab.tsx index af02fca7bb3d..67914237e0b5 100644 --- a/openmetadata-ui/src/main/resources/ui/src/pages/APICollectionPage/APIEndpointsTab.tsx +++ b/openmetadata-ui/src/main/resources/ui/src/pages/APICollectionPage/APIEndpointsTab.tsx @@ -185,6 +185,7 @@ function APIEndpointsTab({
{ {showPagination && ( { {showPagination && ( { { @@ -23,6 +24,10 @@ jest.mock('../../components/IncidentManager/IncidentManager.component', () => { .mockImplementation(() =>
IncidentManager.component
); }); +jest.mock('./IncidentManagerClassBase', () => ({ + getIncidentWidgets: jest.fn(), +})); + describe('IncidentManagerPage', () => { it('should render component', async () => { render(); @@ -34,4 +39,25 @@ describe('IncidentManagerPage', () => { await screen.findByText('IncidentManager.component') ).toBeInTheDocument(); }); + + it('should render WidgetComponent when getIncidentWidgets returns a component', async () => { + const MockWidgetComponent = () =>
Mock Widget
; + (incidentManagerClassBase.getIncidentWidgets as jest.Mock).mockReturnValue( + MockWidgetComponent + ); + + render(); + + expect(await screen.findByText('Mock Widget')).toBeInTheDocument(); + }); + + it('should not render WidgetComponent when getIncidentWidgets returns null', async () => { + (incidentManagerClassBase.getIncidentWidgets as jest.Mock).mockReturnValue( + null + ); + + render(); + + expect(screen.queryByText('Mock Widget')).not.toBeInTheDocument(); + }); }); diff --git a/openmetadata-ui/src/main/resources/ui/src/pages/IncidentManager/IncidentManagerPage.tsx b/openmetadata-ui/src/main/resources/ui/src/pages/IncidentManager/IncidentManagerPage.tsx index d340f727f62f..90f452f9d938 100644 --- a/openmetadata-ui/src/main/resources/ui/src/pages/IncidentManager/IncidentManagerPage.tsx +++ b/openmetadata-ui/src/main/resources/ui/src/pages/IncidentManager/IncidentManagerPage.tsx @@ -11,12 +11,18 @@ * limitations under the License. */ import { Col, Row, Typography } from 'antd'; -import React from 'react'; +import React, { useMemo } from 'react'; import IncidentManager from '../../components/IncidentManager/IncidentManager.component'; import PageLayoutV1 from '../../components/PageLayoutV1/PageLayoutV1'; import { PAGE_HEADERS } from '../../constants/PageHeaders.constant'; +import incidentManagerClassBase from './IncidentManagerClassBase'; const IncidentManagerPage = () => { + const WidgetComponent = useMemo( + () => incidentManagerClassBase.getIncidentWidgets(), + [] + ); + return ( @@ -34,6 +40,12 @@ const IncidentManagerPage = () => { + {WidgetComponent && ( +
+ + + )} + diff --git a/openmetadata-ui/src/main/resources/ui/src/pages/MarketPlacePage/MarketPlacePage.tsx b/openmetadata-ui/src/main/resources/ui/src/pages/MarketPlacePage/MarketPlacePage.tsx index dda41fec3450..7c46ead4c5e4 100644 --- a/openmetadata-ui/src/main/resources/ui/src/pages/MarketPlacePage/MarketPlacePage.tsx +++ b/openmetadata-ui/src/main/resources/ui/src/pages/MarketPlacePage/MarketPlacePage.tsx @@ -153,6 +153,7 @@ const MarketPlacePage = () => { {showPagination && ( { {showPagination && ( { {showPagination && ( 0} pageSize={pageSize} paging={paging} pagingHandler={onPageChange} diff --git a/openmetadata-ui/src/main/resources/ui/src/pages/ObservabilityAlertsPage/ObservabilityAlertsPage.tsx b/openmetadata-ui/src/main/resources/ui/src/pages/ObservabilityAlertsPage/ObservabilityAlertsPage.tsx index 8bfb0e9a8fe8..a50abccabe01 100644 --- a/openmetadata-ui/src/main/resources/ui/src/pages/ObservabilityAlertsPage/ObservabilityAlertsPage.tsx +++ b/openmetadata-ui/src/main/resources/ui/src/pages/ObservabilityAlertsPage/ObservabilityAlertsPage.tsx @@ -242,6 +242,7 @@ const ObservabilityAlertsPage = () => { {showPagination && ( { { {showPagination && ( { {showPagination && ( { {showPagination && ( { - const filteredConstraints = !isEmpty(constraint) - ? constraint - ?.filter((item) => item.constraintType !== ConstraintType.PrimaryKey) - .map((item) => ({ - columns: item.columns?.[0], - relationshipType: item.relationshipType, - referredColumns: item.referredColumns?.[0], - })) + const constraintsWithoutPrimaryKeyData = constraint?.filter( + (item) => item.constraintType !== ConstraintType.PrimaryKey + ); + const filteredConstraints = !isEmpty(constraintsWithoutPrimaryKeyData) + ? constraintsWithoutPrimaryKeyData?.map((item) => ({ + columns: item.columns?.[0], + relationshipType: item.relationshipType, + referredColumns: item.referredColumns?.[0], + })) : [ { - columns: '', - relationshipType: '', - referredColumns: '', + columns: undefined, + relationshipType: undefined, + referredColumns: undefined, }, ]; @@ -202,6 +204,11 @@ const TableConstraintsModal = ({ )} diff --git a/openmetadata-ui/src/main/resources/ui/src/pages/TableDetailsPageV1/TableConstraints/TableConstraintsModal/table-constraint.style.less b/openmetadata-ui/src/main/resources/ui/src/pages/TableDetailsPageV1/TableConstraints/TableConstraintsModal/table-constraint.style.less index 46ece59b3cc3..efb79e21a47b 100644 --- a/openmetadata-ui/src/main/resources/ui/src/pages/TableDetailsPageV1/TableConstraints/TableConstraintsModal/table-constraint.style.less +++ b/openmetadata-ui/src/main/resources/ui/src/pages/TableDetailsPageV1/TableConstraints/TableConstraintsModal/table-constraint.style.less @@ -11,7 +11,7 @@ * limitations under the License. */ -@import url('../../../../styles/variables.less'); +@import (reference) url('../../../../styles/variables.less'); .table-constraint-form-container { margin-bottom: 20px; diff --git a/openmetadata-ui/src/main/resources/ui/src/pages/TableDetailsPageV1/TableDetailsPageV1.tsx b/openmetadata-ui/src/main/resources/ui/src/pages/TableDetailsPageV1/TableDetailsPageV1.tsx index 2d51be9c4d78..b3bc17bc1092 100644 --- a/openmetadata-ui/src/main/resources/ui/src/pages/TableDetailsPageV1/TableDetailsPageV1.tsx +++ b/openmetadata-ui/src/main/resources/ui/src/pages/TableDetailsPageV1/TableDetailsPageV1.tsx @@ -981,7 +981,7 @@ const TableDetailsPageV1: React.FC = () => { ? activeTabForTourDatasetPage : activeTab ?? EntityTabs.SCHEMA } - className="table-details-page-tabs" + className="table-details-page-tabs entity-details-page-tabs" data-testid="tabs" items={tabs} onChange={handleTabChange} diff --git a/openmetadata-ui/src/main/resources/ui/src/pages/UserListPage/UserListPageV1.tsx b/openmetadata-ui/src/main/resources/ui/src/pages/UserListPage/UserListPageV1.tsx index 52062d615ee7..1833734de7d5 100644 --- a/openmetadata-ui/src/main/resources/ui/src/pages/UserListPage/UserListPageV1.tsx +++ b/openmetadata-ui/src/main/resources/ui/src/pages/UserListPage/UserListPageV1.tsx @@ -483,6 +483,7 @@ const UserListPageV1 = () => { {showPagination && ( SelectFieldSettings['asyncFetch'] = ({ searchIndex, entityField }) => { - return (search) => { - return getAggregateFieldOptions( + // Wrapping the fetch function in a debounce of 300 ms + const debouncedFetch = debounce((search, callback) => { + getAggregateFieldOptions( searchIndex, entityField, search ?? '', @@ -125,13 +126,19 @@ class AdvancedSearchClassBase { const buckets = response.data.aggregations[`sterms#${entityField}`].buckets; - return { + callback({ values: buckets.map((bucket) => ({ value: bucket.key, title: bucket.label ?? bucket.key, })), hasMore: false, - }; + }); + }); + }, 300); + + return (search) => { + return new Promise((resolve) => { + debouncedFetch(search, resolve); }); }; }; diff --git a/openmetadata-ui/src/main/resources/ui/src/utils/DatabaseServiceUtils.ts b/openmetadata-ui/src/main/resources/ui/src/utils/DatabaseServiceUtils.ts index 1e33c8649a51..912818d909ad 100644 --- a/openmetadata-ui/src/main/resources/ui/src/utils/DatabaseServiceUtils.ts +++ b/openmetadata-ui/src/main/resources/ui/src/utils/DatabaseServiceUtils.ts @@ -29,6 +29,7 @@ import domoDatabaseConnection from '../jsons/connectionSchemas/connections/datab import dorisConnection from '../jsons/connectionSchemas/connections/database/dorisConnection.json'; import druidConnection from '../jsons/connectionSchemas/connections/database/druidConnection.json'; import dynamoDBConnection from '../jsons/connectionSchemas/connections/database/dynamoDBConnection.json'; +import exasolConnection from '../jsons/connectionSchemas/connections/database/exasolConnection.json'; import glueConnection from '../jsons/connectionSchemas/connections/database/glueConnection.json'; import greenplumConnection from '../jsons/connectionSchemas/connections/database/greenplumConnection.json'; import hiveConnection from '../jsons/connectionSchemas/connections/database/hiveConnection.json'; @@ -121,6 +122,11 @@ export const getDatabaseConfig = (type: DatabaseServiceType) => { break; } + case DatabaseServiceType.Exasol: { + schema = exasolConnection; + + break; + } case DatabaseServiceType.Glue: { schema = glueConnection; diff --git a/openmetadata-ui/src/main/resources/ui/src/utils/ServiceUtilClassBase.ts b/openmetadata-ui/src/main/resources/ui/src/utils/ServiceUtilClassBase.ts index 80abeac24c3c..adcab3a38e08 100644 --- a/openmetadata-ui/src/main/resources/ui/src/utils/ServiceUtilClassBase.ts +++ b/openmetadata-ui/src/main/resources/ui/src/utils/ServiceUtilClassBase.ts @@ -41,6 +41,7 @@ import { DRUID, DYNAMODB, ELASTIC_SEARCH, + EXASOL, FIVETRAN, FLINK, GCS, @@ -311,6 +312,9 @@ class ServiceUtilClassBase { case this.DatabaseServiceTypeSmallCase.DynamoDB: return DYNAMODB; + case this.DatabaseServiceTypeSmallCase.Exasol: + return EXASOL; + case this.DatabaseServiceTypeSmallCase.SingleStore: return SINGLESTORE; diff --git a/openmetadata-ui/src/main/resources/ui/src/utils/TableUtils.tsx b/openmetadata-ui/src/main/resources/ui/src/utils/TableUtils.tsx index 6d02bb20e3af..6e1121032e11 100644 --- a/openmetadata-ui/src/main/resources/ui/src/utils/TableUtils.tsx +++ b/openmetadata-ui/src/main/resources/ui/src/utils/TableUtils.tsx @@ -120,6 +120,37 @@ import { ordinalize } from './StringsUtils'; import { TableDetailPageTabProps } from './TableClassBase'; import { TableFieldsInfoCommonEntities } from './TableUtils.interface'; +import { ReactComponent as IconArray } from '../assets/svg/data-type-icon/array.svg'; +import { ReactComponent as IconBinary } from '../assets/svg/data-type-icon/binary.svg'; +import { ReactComponent as IconBitmap } from '../assets/svg/data-type-icon/bitmap.svg'; +import { ReactComponent as IconBoolean } from '../assets/svg/data-type-icon/boolean.svg'; +import { ReactComponent as IconDateTime } from '../assets/svg/data-type-icon/data-time-range.svg'; +import { ReactComponent as IconDate } from '../assets/svg/data-type-icon/date.svg'; +import { ReactComponent as IconDecimal } from '../assets/svg/data-type-icon/decimal.svg'; +import { ReactComponent as IconDouble } from '../assets/svg/data-type-icon/double.svg'; +import { ReactComponent as IconEnum } from '../assets/svg/data-type-icon/enum.svg'; +import { ReactComponent as IconError } from '../assets/svg/data-type-icon/error.svg'; +import { ReactComponent as IconGeometry } from '../assets/svg/data-type-icon/geometry.svg'; +import { ReactComponent as IconInteger } from '../assets/svg/data-type-icon/integer.svg'; +import { ReactComponent as IconIpVersion } from '../assets/svg/data-type-icon/ipv6.svg'; +import { ReactComponent as IconJson } from '../assets/svg/data-type-icon/json.svg'; +import { ReactComponent as IconMap } from '../assets/svg/data-type-icon/map.svg'; +import { ReactComponent as IconMoney } from '../assets/svg/data-type-icon/money.svg'; +import { ReactComponent as IconNull } from '../assets/svg/data-type-icon/null.svg'; +import { ReactComponent as IconNumeric } from '../assets/svg/data-type-icon/numeric.svg'; +import { ReactComponent as IconPolygon } from '../assets/svg/data-type-icon/polygon.svg'; +import { ReactComponent as IconRecord } from '../assets/svg/data-type-icon/record.svg'; +import { ReactComponent as IconString } from '../assets/svg/data-type-icon/string.svg'; +import { ReactComponent as IconStruct } from '../assets/svg/data-type-icon/struct.svg'; +import { ReactComponent as IconTime } from '../assets/svg/data-type-icon/time.svg'; +import { ReactComponent as IconTimestamp } from '../assets/svg/data-type-icon/timestamp.svg'; +import { ReactComponent as IconTsQuery } from '../assets/svg/data-type-icon/ts-query.svg'; +import { ReactComponent as IconUnion } from '../assets/svg/data-type-icon/union.svg'; +import { ReactComponent as IconUnknown } from '../assets/svg/data-type-icon/unknown.svg'; +import { ReactComponent as IconVarchar } from '../assets/svg/data-type-icon/varchar.svg'; +import { ReactComponent as IconVariant } from '../assets/svg/data-type-icon/variant.svg'; +import { ReactComponent as IconXML } from '../assets/svg/data-type-icon/xml.svg'; + export const getUsagePercentile = (pctRank: number, isLiteral = false) => { const percentile = Math.round(pctRank * 10) / 10; const ordinalPercentile = ordinalize(percentile); @@ -213,6 +244,70 @@ export const getConstraintIcon = ({ ); }; +export const getColumnDataTypeIcon = ({ + dataType, + width = '16px', +}: { + dataType: DataType; + width?: string; +}) => { + const dataTypeIcons = { + [DataType.Array]: IconArray, + [DataType.Bit]: IconBinary, + [DataType.Binary]: IconBinary, + [DataType.Bitmap]: IconBitmap, + [DataType.Image]: IconBitmap, + [DataType.Boolean]: IconBoolean, + [DataType.Date]: IconDate, + [DataType.Year]: IconDate, + [DataType.Datetime]: IconDateTime, + [DataType.Datetimerange]: IconDateTime, + [DataType.Double]: IconDouble, + [DataType.Float]: IconDouble, + [DataType.Number]: IconDouble, + [DataType.Decimal]: IconDecimal, + [DataType.Enum]: IconEnum, + [DataType.Error]: IconError, + [DataType.Map]: IconMap, + [DataType.Geography]: IconMap, + [DataType.Geometry]: IconGeometry, + [DataType.Ipv4]: IconIpVersion, + [DataType.Ipv6]: IconIpVersion, + [DataType.JSON]: IconJson, + [DataType.Numeric]: IconNumeric, + [DataType.Long]: IconNumeric, + [DataType.Money]: IconMoney, + [DataType.Char]: IconVarchar, + [DataType.Text]: IconVarchar, + [DataType.Ntext]: IconVarchar, + [DataType.Mediumtext]: IconVarchar, + [DataType.Varchar]: IconVarchar, + [DataType.Int]: IconInteger, + [DataType.Bigint]: IconInteger, + [DataType.Largeint]: IconInteger, + [DataType.Smallint]: IconInteger, + [DataType.Tinyint]: IconInteger, + [DataType.Polygon]: IconPolygon, + [DataType.Null]: IconNull, + [DataType.Record]: IconRecord, + [DataType.Table]: IconRecord, + [DataType.String]: IconString, + [DataType.Struct]: IconStruct, + [DataType.Time]: IconTime, + [DataType.Timestamp]: IconTimestamp, + [DataType.Timestampz]: IconTimestamp, + [DataType.Tsquery]: IconTsQuery, + [DataType.Union]: IconUnion, + [DataType.Unknown]: IconUnknown, + [DataType.Variant]: IconVariant, + [DataType.XML]: IconXML, + }; + + const icon = dataTypeIcons[dataType as keyof typeof dataTypeIcons] || null; + + return ; +}; + export const getEntityIcon = ( indexType: string, iconClass = '', diff --git a/pom.xml b/pom.xml index c70d73924f4c..90b35cad718c 100644 --- a/pom.xml +++ b/pom.xml @@ -89,7 +89,7 @@ 3.3.0 3.1.2 3.6.0 - 3.3.0 + 3.3.1 3.0.2 3.0.2 2.15.0 @@ -157,6 +157,7 @@ 2.9.0 1.14.4 +