From 81f055df7531d999486046895256562c41194b64 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 18 Jun 2025 13:40:10 +0300 Subject: [PATCH 01/37] More aggressive idle checks --- tests/integration/ha_tests/test_self_healing_1.py | 12 ++++++++++-- tests/integration/ha_tests/test_self_healing_2.py | 12 ++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/tests/integration/ha_tests/test_self_healing_1.py b/tests/integration/ha_tests/test_self_healing_1.py index f972303380..56c7ed1583 100644 --- a/tests/integration/ha_tests/test_self_healing_1.py +++ b/tests/integration/ha_tests/test_self_healing_1.py @@ -12,6 +12,7 @@ from ..helpers import ( APPLICATION_NAME, CHARM_BASE, + DATABASE_APP_NAME, METADATA, app_name, build_and_deploy, @@ -71,8 +72,15 @@ async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: ) if wait_for_apps: - async with ops_test.fast_forward(): - await ops_test.model.wait_for_idle(status="active", timeout=1000, raise_on_error=False) + await ops_test.model.wait_for_idle( + apps=[ + APPLICATION_NAME, + DATABASE_APP_NAME, + ], + status="active", + timeout=1000, + idle_period=30, + ) @pytest.mark.abort_on_fail diff --git a/tests/integration/ha_tests/test_self_healing_2.py b/tests/integration/ha_tests/test_self_healing_2.py index 43b7d6a062..028fa3844c 100644 --- a/tests/integration/ha_tests/test_self_healing_2.py +++ b/tests/integration/ha_tests/test_self_healing_2.py @@ -8,6 +8,7 @@ from ..helpers import ( APPLICATION_NAME, CHARM_BASE, + DATABASE_APP_NAME, METADATA, app_name, build_and_deploy, @@ -52,8 +53,15 @@ async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: ) if wait_for_apps: - async with ops_test.fast_forward(): - await ops_test.model.wait_for_idle(status="active", timeout=1000, raise_on_error=False) + await ops_test.model.wait_for_idle( + apps=[ + APPLICATION_NAME, + DATABASE_APP_NAME, + ], + status="active", + timeout=1000, + idle_period=30, + ) @pytest.mark.abort_on_fail From 9456b49b67c4fccb5d89cd82eea67294bd0f6217 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 18 Jun 2025 16:48:01 +0300 Subject: [PATCH 02/37] Explicit idle --- tests/integration/ha_tests/helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/ha_tests/helpers.py b/tests/integration/ha_tests/helpers.py index a719564c04..7313c5fc58 100644 --- a/tests/integration/ha_tests/helpers.py +++ b/tests/integration/ha_tests/helpers.py @@ -919,7 +919,7 @@ async def start_continuous_writes(ops_test: OpsTest, app: str, model: Model = No ] if not relations: await model.relate(app, f"{APPLICATION_NAME}:database") - await model.wait_for_idle(status="active", timeout=1000) + await model.wait_for_idle(apps=[APPLICATION_NAME, app], status="active", timeout=1000) else: action = ( await model.applications[APPLICATION_NAME] From 09ca1e55bd3a47116d2d3908723fb7c9b28f47e9 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 18 Jun 2025 23:38:53 +0300 Subject: [PATCH 03/37] Idle period when relating to the test app --- tests/integration/ha_tests/helpers.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/ha_tests/helpers.py b/tests/integration/ha_tests/helpers.py index 7313c5fc58..07aabf2324 100644 --- a/tests/integration/ha_tests/helpers.py +++ b/tests/integration/ha_tests/helpers.py @@ -919,7 +919,9 @@ async def start_continuous_writes(ops_test: OpsTest, app: str, model: Model = No ] if not relations: await model.relate(app, f"{APPLICATION_NAME}:database") - await model.wait_for_idle(apps=[APPLICATION_NAME, app], status="active", timeout=1000) + await model.wait_for_idle( + apps=[APPLICATION_NAME, app], status="active", timeout=1000, idle_period=30 + ) else: action = ( await model.applications[APPLICATION_NAME] From 1fa040ef339d00273b4d2b61f8bcf6545503d917 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 19 Jun 2025 13:56:14 +0300 Subject: [PATCH 04/37] Remove second start --- tests/integration/ha_tests/helpers.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tests/integration/ha_tests/helpers.py b/tests/integration/ha_tests/helpers.py index 07aabf2324..ae329e977f 100644 --- a/tests/integration/ha_tests/helpers.py +++ b/tests/integration/ha_tests/helpers.py @@ -922,13 +922,6 @@ async def start_continuous_writes(ops_test: OpsTest, app: str, model: Model = No await model.wait_for_idle( apps=[APPLICATION_NAME, app], status="active", timeout=1000, idle_period=30 ) - else: - action = ( - await model.applications[APPLICATION_NAME] - .units[0] - .run_action("start-continuous-writes") - ) - await action.wait() for attempt in Retrying(stop=stop_after_delay(60 * 5), wait=wait_fixed(3), reraise=True): with attempt: action = ( From daff032f1792dfcac1bd3da7b6c7d808cbd8da92 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 19 Jun 2025 15:10:36 +0300 Subject: [PATCH 05/37] Remove log warning --- src/charm.py | 19 ++++++------------- tests/unit/conftest.py | 2 +- tests/unit/test_charm.py | 4 ++-- 3 files changed, 9 insertions(+), 16 deletions(-) diff --git a/src/charm.py b/src/charm.py index fe0f976441..aad831c271 100755 --- a/src/charm.py +++ b/src/charm.py @@ -59,7 +59,7 @@ from lightkube.models.core_v1 import ServicePort, ServiceSpec from lightkube.models.meta_v1 import ObjectMeta from lightkube.resources.core_v1 import Endpoints, Node, Pod, Service -from ops import JujuVersion, main +from ops import main from ops.charm import ( ActionEvent, HookEvent, @@ -217,8 +217,9 @@ def __init__(self, *args): self._context = {"namespace": self._namespace, "app_name": self._name} self.cluster_name = f"patroni-{self._name}" - juju_version = JujuVersion.from_environ() - run_cmd = "/usr/bin/juju-exec" if juju_version.major > 2 else "/usr/bin/juju-run" + run_cmd = ( + "/usr/bin/juju-exec" if self.model.juju_version.major > 2 else "/usr/bin/juju-run" + ) self._observer = AuthorisationRulesObserver(self, run_cmd) self.framework.observe( self.on.authorisation_rules_change, self._on_authorisation_rules_change @@ -271,7 +272,7 @@ def __init__(self, *args): relation_name="logging", ) - if JujuVersion.from_environ().supports_open_port_on_k8s: + if self.model.juju_version.supports_open_port_on_k8s: try: self.unit.set_ports(5432, 8008) except ModelError: @@ -292,14 +293,6 @@ def tracing_endpoint(self) -> str | None: if self.tracing.is_ready(): return self.tracing.get_endpoint(TRACING_PROTOCOL) - @property - def _pebble_log_forwarding_supported(self) -> bool: - # https://github.com/canonical/operator/issues/1230 - from ops.jujuversion import JujuVersion - - juju_version = JujuVersion.from_environ() - return juju_version > JujuVersion(version="3.3") - def _generate_metrics_jobs(self, enable_tls: bool) -> dict: """Generate spec for Prometheus scraping.""" return [ @@ -367,7 +360,7 @@ def peer_relation_data(self, scope: Scopes) -> DataPeerData: def _translate_field_to_secret_key(self, key: str) -> str: """Change 'key' to secrets-compatible key field.""" - if not JujuVersion.from_environ().has_secrets: + if not self.model.juju_version.has_secrets: return key key = SECRET_KEY_OVERRIDES.get(key, key) new_key = key.replace("_", "-") diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index eb0faf7410..ec3b066d99 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -10,7 +10,7 @@ # charm.JujuVersion.has_secrets set as True or as False @pytest.fixture(params=[True, False], autouse=True) def juju_has_secrets(request, monkeypatch): - monkeypatch.setattr("charm.JujuVersion.has_secrets", PropertyMock(return_value=request.param)) + monkeypatch.setattr("ops.JujuVersion.has_secrets", PropertyMock(return_value=request.param)) return request.param diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 28ec888eea..a624dab832 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -54,10 +54,10 @@ def harness(): def test_set_ports(only_with_juju_secrets): with ( - patch("charm.JujuVersion") as _juju_version, + patch("ops.model.Model.juju_version", new_callable=PropertyMock) as _juju_version, patch("charm.PostgresqlOperatorCharm.unit") as _unit, ): - _juju_version.from_environ.return_value.major = 3 + _juju_version.return_value.major = 3 harness = Harness(PostgresqlOperatorCharm) harness.begin() _unit.set_ports.assert_called_once_with(5432, 8008) From 8b0e099010c0a7373df40e56382fd2d020cfbf42 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 19 Jun 2025 16:20:24 +0300 Subject: [PATCH 06/37] Hold create db hook for longer --- src/relations/postgresql_provider.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index e2fb4f5c92..833ae4ea7f 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -166,13 +166,14 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: # Try to wait for pg_hba trigger try: - for attempt in Retrying(stop=stop_after_attempt(3), wait=wait_fixed(1)): + for attempt in Retrying(stop=stop_after_attempt(5), wait=wait_fixed(1)): with attempt: if not self.charm.postgresql.is_user_in_hba(user): raise Exception("pg_hba not ready") self.charm.unit_peer_data.update({ "pg_hba_needs_update_timestamp": str(datetime.now()) }) + self.charm.update_config() except RetryError: logger.warning("database requested: Unable to check pg_hba rule update") From efde10d3af491315e200eaf61b0bbcaecdff7b3b Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 19 Jun 2025 18:40:54 +0300 Subject: [PATCH 07/37] Bump the pg_hba checker timeout --- src/relations/postgresql_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 833ae4ea7f..c7efb094d0 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -166,7 +166,7 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: # Try to wait for pg_hba trigger try: - for attempt in Retrying(stop=stop_after_attempt(5), wait=wait_fixed(1)): + for attempt in Retrying(stop=stop_after_attempt(5), wait=wait_fixed(2)): with attempt: if not self.charm.postgresql.is_user_in_hba(user): raise Exception("pg_hba not ready") From f6de4ae4994622ca3c75412b90c8ba8a6634dd5c Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 19 Jun 2025 19:13:01 +0300 Subject: [PATCH 08/37] Don't update config --- src/relations/postgresql_provider.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index c7efb094d0..bb54484a25 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -173,7 +173,6 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: self.charm.unit_peer_data.update({ "pg_hba_needs_update_timestamp": str(datetime.now()) }) - self.charm.update_config() except RetryError: logger.warning("database requested: Unable to check pg_hba rule update") From aaada846c0d4c273febdb06ce9ed2ad657cbce93 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 19 Jun 2025 20:24:27 +0300 Subject: [PATCH 09/37] Bump timeout --- src/relations/postgresql_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index bb54484a25..6e09a467b6 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -166,7 +166,7 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: # Try to wait for pg_hba trigger try: - for attempt in Retrying(stop=stop_after_attempt(5), wait=wait_fixed(2)): + for attempt in Retrying(stop=stop_after_attempt(15), wait=wait_fixed(1)): with attempt: if not self.charm.postgresql.is_user_in_hba(user): raise Exception("pg_hba not ready") From 5fb6e65ea9be7c5e924ebd94207e7721ea4be2f9 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Fri, 20 Jun 2025 17:47:04 +0300 Subject: [PATCH 10/37] Try to just append to pg_hba --- .github/workflows/lib-check.yaml | 4 +++- lib/charms/postgresql_k8s/v0/postgresql.py | 12 ++++++++++++ src/relations/postgresql_provider.py | 17 ++++++++--------- 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/.github/workflows/lib-check.yaml b/.github/workflows/lib-check.yaml index e2816f83f8..f1d1f6c2cc 100644 --- a/.github/workflows/lib-check.yaml +++ b/.github/workflows/lib-check.yaml @@ -32,4 +32,6 @@ jobs: with: credentials: "${{ secrets.CHARMHUB_TOKEN }}" github-token: "${{ secrets.GITHUB_TOKEN }}" - + permissions: + # Add label to prs + pull-requests: write diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index 54453a3623..ce75d3ba9d 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -1099,3 +1099,15 @@ def is_user_in_hba(self, username: str) -> bool: finally: if connection: connection.close() + + def reload_config(self) -> None: + """Reload postgresql's configuration on the local unit.""" + connection = None + try: + connection = self._connect_to_database(database_host=self.current_host) + connection.autocommit = True + with connection.cursor() as cursor: + cursor.execute("SELECT pg_reload_conf();") + finally: + if connection is not None: + connection.close() diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 6e09a467b6..10b40782fe 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -22,11 +22,11 @@ from ops.charm import CharmBase, RelationBrokenEvent, RelationChangedEvent, RelationDepartedEvent from ops.framework import Object from ops.model import ActiveStatus, BlockedStatus, Relation -from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed from constants import ( DATABASE_PORT, ENDPOINT_SIMULTANEOUSLY_BLOCKING_MESSAGE, + POSTGRESQL_DATA_PATH, ) from utils import new_password @@ -164,17 +164,16 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: ) return - # Try to wait for pg_hba trigger - try: - for attempt in Retrying(stop=stop_after_attempt(15), wait=wait_fixed(1)): - with attempt: - if not self.charm.postgresql.is_user_in_hba(user): - raise Exception("pg_hba not ready") + # Try to update pg_hba + if not self.charm.postgresql.is_user_in_hba(user): + with open(f"{POSTGRESQL_DATA_PATH}/pg_hba.conf", "a") as file: + file.write( + f"{'hostssl' if self.charm.is_tls_enabled else 'host'} {database} {user} 0.0.0.0/0 md5" + ) + self.charm.postgresql.reload_config() self.charm.unit_peer_data.update({ "pg_hba_needs_update_timestamp": str(datetime.now()) }) - except RetryError: - logger.warning("database requested: Unable to check pg_hba rule update") def _on_relation_departed(self, event: RelationDepartedEvent) -> None: """Set a flag to avoid deleting database users when not wanted.""" From a2baa62e68827c116b46c63e38ff5d2346d22a66 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 25 Jun 2025 15:43:35 +0300 Subject: [PATCH 11/37] Sync hba changes before creating db resources --- lib/charms/postgresql_k8s/v0/postgresql.py | 12 ------ src/charm.py | 7 ++++ src/relations/postgresql_provider.py | 47 ++++++++++++++++------ 3 files changed, 42 insertions(+), 24 deletions(-) diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index ce75d3ba9d..54453a3623 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -1099,15 +1099,3 @@ def is_user_in_hba(self, username: str) -> bool: finally: if connection: connection.close() - - def reload_config(self) -> None: - """Reload postgresql's configuration on the local unit.""" - connection = None - try: - connection = self._connect_to_database(database_host=self.current_host) - connection.autocommit = True - with connection.cursor() as cursor: - cursor.execute("SELECT pg_reload_conf();") - finally: - if connection is not None: - connection.close() diff --git a/src/charm.py b/src/charm.py index aad831c271..27a1dcf0bf 100755 --- a/src/charm.py +++ b/src/charm.py @@ -2051,6 +2051,13 @@ def _can_connect_to_postgresql(self) -> bool: def update_config(self, is_creating_backup: bool = False) -> bool: """Updates Patroni config file based on the existence of the TLS files.""" + if self.unit_peer_data.get("hba_hash") != self.app_peer_data.get("hba_hash"): + logger.info("Updating hba definitions") + self.postgresql_client_relation.append_to_pg_hba() + hba_hash = self.postgresql_client_relation.generate_hba_hash() + self.unit_peer_data.update({"hba_hash": hba_hash}) + if self.unit.is_leader(): + self.app_peer_data.update({"hba_hash": hba_hash}) # Retrieve PostgreSQL parameters. if self.config.profile_limit_memory: limit_memory = self.config.profile_limit_memory * 10**6 diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 10b40782fe..464d493842 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -4,7 +4,7 @@ """Postgres client relation hooks & helpers.""" import logging -from datetime import datetime +from hashlib import shake_128 from charms.data_platform_libs.v0.data_interfaces import ( DatabaseProvides, @@ -69,6 +69,27 @@ def __init__(self, charm: CharmBase, relation_name: str = "database") -> None: self.database_provides.on.database_requested, self._on_database_requested ) + def generate_hba_hash(self) -> str: + """Generate expected user and database hash.""" + user_db_pairs = {} + for relation in self.model.relations[self.relation_name]: + if database := self.database_provides.fetch_relation_field(relation.id, "database"): + user = f"relation_id_{relation.id}" + user_db_pairs[user] = database + return shake_128(str(user_db_pairs).encode()).hexdigest(16) + + def append_to_pg_hba(self) -> None: + """Append missing users to pg hba.""" + with open(f"{POSTGRESQL_DATA_PATH}/pg_hba.conf", "a") as file: + for relation in self.model.relations[self.relation_name]: + user = f"relation_id_{relation.id}" + if database := self.database_provides.fetch_relation_field( + relation.id, "database" + ) and not self.charm.postgresql.is_user_in_hba(user): + file.write( + f"{'hostssl' if self.charm.is_tls_enabled else 'host'} {database} {user} 0.0.0.0/0 md5" + ) + @staticmethod def _sanitize_extra_roles(extra_roles: str | None) -> list[str]: """Standardize and sanitize user extra-roles.""" @@ -93,6 +114,19 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: event.defer() return + hba_hash = self.generate_hba_hash() + self.charm.app_peer_data.update({"hba_hash": hba_hash}) + for key in self.charm._peers.data: + # We skip the leader so we don't have to wait on the defer + if ( + key != self.charm.app + and key != self.charm.unit + and self.charm._peers.data[key].get("hba_hash", "") != hba_hash + ): + logger.debug("Not all units have synced configuration") + event.defer() + return + # Retrieve the database name and extra user roles using the charm library. database = event.database @@ -164,17 +198,6 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: ) return - # Try to update pg_hba - if not self.charm.postgresql.is_user_in_hba(user): - with open(f"{POSTGRESQL_DATA_PATH}/pg_hba.conf", "a") as file: - file.write( - f"{'hostssl' if self.charm.is_tls_enabled else 'host'} {database} {user} 0.0.0.0/0 md5" - ) - self.charm.postgresql.reload_config() - self.charm.unit_peer_data.update({ - "pg_hba_needs_update_timestamp": str(datetime.now()) - }) - def _on_relation_departed(self, event: RelationDepartedEvent) -> None: """Set a flag to avoid deleting database users when not wanted.""" # Set a flag to avoid deleting database users when this unit From 2d3d97f5b50ff5d1deba14679df5dd7d3999f660 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 25 Jun 2025 16:28:57 +0300 Subject: [PATCH 12/37] Force regenerate hash and config on leader --- src/relations/postgresql_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 464d493842..d207a7ec12 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -115,7 +115,7 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: return hba_hash = self.generate_hba_hash() - self.charm.app_peer_data.update({"hba_hash": hba_hash}) + self.charm.update_config() for key in self.charm._peers.data: # We skip the leader so we don't have to wait on the defer if ( From 71e392dd8be40e80a482d402b98f443af6f95aa8 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 25 Jun 2025 16:34:45 +0300 Subject: [PATCH 13/37] Use current host to check hba --- lib/charms/postgresql_k8s/v0/postgresql.py | 9 ++++++--- src/relations/postgresql_provider.py | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index 54453a3623..71cc966f51 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -35,7 +35,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 53 +LIBPATCH = 54 # Groups to distinguish HBA access ACCESS_GROUP_IDENTITY = "identity_access" @@ -1082,11 +1082,14 @@ def validate_group_map(self, group_map: Optional[str]) -> bool: return True - def is_user_in_hba(self, username: str) -> bool: + def is_user_in_hba(self, username: str, current_host=False) -> bool: """Check if user was added in pg_hba.""" + host = self.current_host if current_host else None connection = None try: - with self._connect_to_database() as connection, connection.cursor() as cursor: + with self._connect_to_database( + database_host=host + ) as connection, connection.cursor() as cursor: cursor.execute( SQL( "SELECT COUNT(*) FROM pg_hba_file_rules WHERE {} = ANY(user_name);" diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index d207a7ec12..421d81e8d9 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -85,7 +85,7 @@ def append_to_pg_hba(self) -> None: user = f"relation_id_{relation.id}" if database := self.database_provides.fetch_relation_field( relation.id, "database" - ) and not self.charm.postgresql.is_user_in_hba(user): + ) and not self.charm.postgresql.is_user_in_hba(user, current_host=True): file.write( f"{'hostssl' if self.charm.is_tls_enabled else 'host'} {database} {user} 0.0.0.0/0 md5" ) From d3edfba614008a8602fceaec3e7166dd3433f993 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 25 Jun 2025 16:39:39 +0300 Subject: [PATCH 14/37] Update libs --- .../data_platform_libs/v0/data_interfaces.py | 112 ++++++++++++++---- lib/charms/data_platform_libs/v0/upgrade.py | 4 +- .../grafana_k8s/v0/grafana_dashboard.py | 20 +++- lib/charms/loki_k8s/v1/loki_push_api.py | 4 +- .../prometheus_k8s/v0/prometheus_scrape.py | 27 ++++- .../tempo_coordinator_k8s/v0/charm_tracing.py | 13 +- .../tempo_coordinator_k8s/v0/tracing.py | 20 +++- .../v2/tls_certificates.py | 4 +- 8 files changed, 162 insertions(+), 42 deletions(-) diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py index 14c7cadca4..7314689c38 100644 --- a/lib/charms/data_platform_libs/v0/data_interfaces.py +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -331,7 +331,7 @@ def _on_topic_requested(self, event: TopicRequestedEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 45 +LIBPATCH = 47 PYDEPS = ["ops>=2.0.0"] @@ -989,11 +989,7 @@ def __init__( @property def relations(self) -> List[Relation]: """The list of Relation instances associated with this relation_name.""" - return [ - relation - for relation in self._model.relations[self.relation_name] - if self._is_relation_active(relation) - ] + return self._model.relations[self.relation_name] @property def secrets_enabled(self): @@ -1271,15 +1267,6 @@ def _legacy_apply_on_delete(self, fields: List[str]) -> None: # Internal helper methods - @staticmethod - def _is_relation_active(relation: Relation): - """Whether the relation is active based on contained data.""" - try: - _ = repr(relation.data) - return True - except (RuntimeError, ModelError): - return False - @staticmethod def _is_secret_field(field: str) -> bool: """Is the field in question a secret reference (URI) field or not?""" @@ -2582,7 +2569,7 @@ def __init__( ################################################################################ -# Cross-charm Relatoins Data Handling and Evenets +# Cross-charm Relations Data Handling and Events ################################################################################ # Generic events @@ -3281,7 +3268,7 @@ def __init__( # Kafka Events -class KafkaProvidesEvent(RelationEvent): +class KafkaProvidesEvent(RelationEventWithSecret): """Base class for Kafka events.""" @property @@ -3300,6 +3287,40 @@ def consumer_group_prefix(self) -> Optional[str]: return self.relation.data[self.relation.app].get("consumer-group-prefix") + @property + def mtls_cert(self) -> Optional[str]: + """Returns TLS cert of the client.""" + if not self.relation.app: + return None + + if not self.secrets_enabled: + raise SecretsUnavailableError("Secrets unavailable on current Juju version") + + secret_field = f"{PROV_SECRET_PREFIX}{SECRET_GROUPS.MTLS}" + if secret_uri := self.relation.data[self.app].get(secret_field): + secret = self.framework.model.get_secret(id=secret_uri) + content = secret.get_content(refresh=True) + if content: + return content.get("mtls-cert") + + +class KafkaClientMtlsCertUpdatedEvent(KafkaProvidesEvent): + """Event emitted when the mtls relation is updated.""" + + def __init__(self, handle, relation, old_mtls_cert: Optional[str] = None, app=None, unit=None): + super().__init__(handle, relation, app, unit) + + self.old_mtls_cert = old_mtls_cert + + def snapshot(self): + """Return a snapshot of the event.""" + return super().snapshot() | {"old_mtls_cert": self.old_mtls_cert} + + def restore(self, snapshot): + """Restore the event from a snapshot.""" + super().restore(snapshot) + self.old_mtls_cert = snapshot["old_mtls_cert"] + class TopicRequestedEvent(KafkaProvidesEvent, ExtraRoleEvent): """Event emitted when a new topic is requested for use on this relation.""" @@ -3312,6 +3333,7 @@ class KafkaProvidesEvents(CharmEvents): """ topic_requested = EventSource(TopicRequestedEvent) + mtls_cert_updated = EventSource(KafkaClientMtlsCertUpdatedEvent) class KafkaRequiresEvent(RelationEvent): @@ -3429,6 +3451,13 @@ def __init__(self, charm: CharmBase, relation_data: KafkaProviderData) -> None: def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: """Event emitted when the relation has changed.""" super()._on_relation_changed_event(event) + + new_data_keys = list(event.relation.data[event.app].keys()) + if any(newval for newval in new_data_keys if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, new_data_keys) + + getattr(self.on, "mtls_cert_updated").emit(event.relation, app=event.app, unit=event.unit) + # Leader only if not self.relation_data.local_unit.is_leader(): return @@ -3443,6 +3472,33 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: event.relation, app=event.app, unit=event.unit ) + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + if not event.secret.label: + return + + relation = self.relation_data._relation_from_secret_label(event.secret.label) + if not relation: + logging.info( + f"Received secret {event.secret.label} but couldn't parse, seems irrelevant" + ) + return + + if relation.app == self.charm.app: + logging.info("Secret changed event ignored for Secret Owner") + + remote_unit = None + for unit in relation.units: + if unit.app != self.charm.app: + remote_unit = unit + + old_mtls_cert = event.secret.get_content().get("mtls-cert") + # mtls-cert is the only secret that can be updated + logger.info("mtls-cert updated") + getattr(self.on, "mtls_cert_updated").emit( + relation, app=relation.app, unit=remote_unit, old_mtls_cert=old_mtls_cert + ) + class KafkaProvides(KafkaProviderData, KafkaProviderEventHandlers): """Provider-side of the Kafka relation.""" @@ -3463,11 +3519,13 @@ def __init__( extra_user_roles: Optional[str] = None, consumer_group_prefix: Optional[str] = None, additional_secret_fields: Optional[List[str]] = [], + mtls_cert: Optional[str] = None, ): """Manager of Kafka client relations.""" super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) self.topic = topic self.consumer_group_prefix = consumer_group_prefix or "" + self.mtls_cert = mtls_cert @property def topic(self): @@ -3481,6 +3539,15 @@ def topic(self, value): raise ValueError(f"Error on topic '{value}', cannot be a wildcard.") self._topic = value + def set_mtls_cert(self, relation_id: int, mtls_cert: str) -> None: + """Set the mtls cert in the application relation databag / secret. + + Args: + relation_id: the identifier for a particular relation. + mtls_cert: mtls cert. + """ + self.update_relation_data(relation_id, {"mtls-cert": mtls_cert}) + class KafkaRequirerEventHandlers(RequirerEventHandlers): """Requires-side of the Kafka relation.""" @@ -3502,6 +3569,9 @@ def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: # Sets topic, extra user roles, and "consumer-group-prefix" in the relation relation_data = {"topic": self.relation_data.topic} + if self.relation_data.mtls_cert: + relation_data["mtls-cert"] = self.relation_data.mtls_cert + if self.relation_data.extra_user_roles: relation_data["extra-user-roles"] = self.relation_data.extra_user_roles @@ -3560,15 +3630,17 @@ def __init__( extra_user_roles: Optional[str] = None, consumer_group_prefix: Optional[str] = None, additional_secret_fields: Optional[List[str]] = [], + mtls_cert: Optional[str] = None, ) -> None: KafkaRequirerData.__init__( self, charm.model, relation_name, topic, - extra_user_roles, - consumer_group_prefix, - additional_secret_fields, + extra_user_roles=extra_user_roles, + consumer_group_prefix=consumer_group_prefix, + additional_secret_fields=additional_secret_fields, + mtls_cert=mtls_cert, ) KafkaRequirerEventHandlers.__init__(self, charm, self) diff --git a/lib/charms/data_platform_libs/v0/upgrade.py b/lib/charms/data_platform_libs/v0/upgrade.py index 4d909d644d..5d051e9b5c 100644 --- a/lib/charms/data_platform_libs/v0/upgrade.py +++ b/lib/charms/data_platform_libs/v0/upgrade.py @@ -285,7 +285,7 @@ def restart(self, event) -> None: # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 18 +LIBPATCH = 19 PYDEPS = ["pydantic>=1.10,<2", "poetry-core"] @@ -929,7 +929,7 @@ def _on_upgrade_charm(self, event: UpgradeCharmEvent) -> None: # for k8s run version checks only on highest ordinal unit if ( self.charm.unit.name - == f"{self.charm.app.name}/{self.charm.app.planned_units() -1}" + == f"{self.charm.app.name}/{self.charm.app.planned_units() - 1}" ): try: self._upgrade_supported_check() diff --git a/lib/charms/grafana_k8s/v0/grafana_dashboard.py b/lib/charms/grafana_k8s/v0/grafana_dashboard.py index c11f292b89..2b5bcff9e8 100644 --- a/lib/charms/grafana_k8s/v0/grafana_dashboard.py +++ b/lib/charms/grafana_k8s/v0/grafana_dashboard.py @@ -219,7 +219,7 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 43 +LIBPATCH = 44 PYDEPS = ["cosl >= 0.0.50"] @@ -1676,14 +1676,22 @@ def _set_default_data(self) -> None: def set_peer_data(self, key: str, data: Any) -> None: """Put information into the peer data bucket instead of `StoredState`.""" - self._charm.peers.data[self._charm.app][key] = json.dumps(data) # type: ignore[attr-defined] + peers = self._charm.peers # type: ignore[attr-defined] + if not peers or not peers.data: + logger.info("set_peer_data: no peer relation. Is the charm being installed/removed?") + return + peers.data[self._charm.app][key] = json.dumps(data) # type: ignore[attr-defined] def get_peer_data(self, key: str) -> Any: """Retrieve information from the peer data bucket instead of `StoredState`.""" - if rel := self._charm.peers: # type: ignore[attr-defined] - data = rel.data[self._charm.app].get(key, "") - return json.loads(data) if data else {} - return {} + peers = self._charm.peers # type: ignore[attr-defined] + if not peers or not peers.data: + logger.warning( + "get_peer_data: no peer relation. Is the charm being installed/removed?" + ) + return {} + data = peers.data[self._charm.app].get(key, "") + return json.loads(data) if data else {} class GrafanaDashboardAggregator(Object): diff --git a/lib/charms/loki_k8s/v1/loki_push_api.py b/lib/charms/loki_k8s/v1/loki_push_api.py index 57e7c90522..342f782c9d 100644 --- a/lib/charms/loki_k8s/v1/loki_push_api.py +++ b/lib/charms/loki_k8s/v1/loki_push_api.py @@ -546,7 +546,7 @@ def __init__(self, ...): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 16 +LIBPATCH = 17 PYDEPS = ["cosl"] @@ -1354,7 +1354,7 @@ def _url(self) -> str: Return url to loki, including port number, but without the endpoint subpath. """ - return "http://{}:{}".format(socket.getfqdn(), self.port) + return f"{self.scheme}://{socket.getfqdn()}:{self.port}" def _endpoint(self, url) -> dict: """Get Loki push API endpoint for a given url. diff --git a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py index 1156b172af..5d7aafe6ff 100644 --- a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py +++ b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py @@ -362,7 +362,7 @@ def _on_scrape_targets_changed(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 52 +LIBPATCH = 53 # Version 0.0.53 needed for cosl.rules.generic_alert_groups PYDEPS = ["cosl>=0.0.53"] @@ -1265,6 +1265,15 @@ def _dedupe_job_names(jobs: List[dict]): return deduped_jobs +def _dedupe_list(items: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """Deduplicate items in the list via object identity.""" + unique_items = [] + for item in items: + if item not in unique_items: + unique_items.append(item) + return unique_items + + def _resolve_dir_against_charm_path(charm: CharmBase, *path_elements: str) -> str: """Resolve the provided path items against the directory of the main file. @@ -1538,7 +1547,7 @@ def set_scrape_job_spec(self, _=None): if self._forward_alert_rules: alert_rules.add_path(self._alert_rules_path, recursive=True) alert_rules.add( - generic_alert_groups.application_rules, group_name_prefix=self.topology.identifier + copy.deepcopy(generic_alert_groups.application_rules), group_name_prefix=self.topology.identifier ) alert_rules_as_dict = alert_rules.as_dict() @@ -1889,6 +1898,9 @@ def _set_prometheus_data(self, event: Optional[RelationJoinedEvent] = None): ) groups.extend(alert_rules.as_dict()["groups"]) + groups = _dedupe_list(groups) + jobs = _dedupe_list(jobs) + # Set scrape jobs and alert rules in relation data relations = [event.relation] if event else self.model.relations[self._prometheus_relation] for rel in relations: @@ -2141,10 +2153,12 @@ def _on_alert_rules_changed(self, event): self.set_alert_rule_data(app_name, unit_rules) def set_alert_rule_data(self, name: str, unit_rules: dict, label_rules: bool = True) -> None: - """Update alert rule data. + """Consolidate incoming alert rules (from stored-state or event) with those from relation data. - The unit rules should be a dict, which is has additional Juju topology labels added. For + The unit rules should be a dict, which have additional Juju topology labels added. For rules generated by the NRPE exporter, they are pre-labeled so lookups can be performed. + The unit rules are combined with the alert rules from relation data before being written + back to relation data and stored-state. """ if not self._charm.unit.is_leader(): return @@ -2166,6 +2180,9 @@ def set_alert_rule_data(self, name: str, unit_rules: dict, label_rules: bool = T if updated_group["name"] not in [g["name"] for g in groups]: groups.append(updated_group) + + groups = _dedupe_list(groups) + relation.data[self._charm.app]["alert_rules"] = json.dumps( {"groups": groups if self._forward_alert_rules else []} ) @@ -2216,6 +2233,8 @@ def remove_alert_rules(self, group_name: str, unit_name: str) -> None: changed_group["rules"] = rules_kept # type: ignore groups.append(changed_group) + groups = _dedupe_list(groups) + relation.data[self._charm.app]["alert_rules"] = json.dumps( {"groups": groups if self._forward_alert_rules else []} ) diff --git a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py index e2208f756f..050e5b384b 100644 --- a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py +++ b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py @@ -314,7 +314,7 @@ def _remove_stale_otel_sdk_packages(): import opentelemetry import ops from opentelemetry.exporter.otlp.proto.common._internal.trace_encoder import ( - encode_spans, + encode_spans # type: ignore ) from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import Resource @@ -348,7 +348,7 @@ def _remove_stale_otel_sdk_packages(): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 7 +LIBPATCH = 8 PYDEPS = ["opentelemetry-exporter-otlp-proto-http==1.21.0"] @@ -704,7 +704,14 @@ def _get_server_cert( f"{charm_type}.{server_cert_attr} is None; sending traces over INSECURE connection." ) return - elif not Path(server_cert).is_absolute(): + if not isinstance(server_cert, (str, Path)): + logger.warning( + f"{charm_type}.{server_cert_attr} has unexpected type {type(server_cert)}; " + f"sending traces over INSECURE connection." + ) + return + path = Path(server_cert) + if not path.is_absolute() or not path.exists(): raise ValueError( f"{charm_type}.{server_cert_attr} should resolve to a valid tls cert absolute path (string | Path)); " f"got {server_cert} instead." diff --git a/lib/charms/tempo_coordinator_k8s/v0/tracing.py b/lib/charms/tempo_coordinator_k8s/v0/tracing.py index 0128db352a..1059603b1c 100644 --- a/lib/charms/tempo_coordinator_k8s/v0/tracing.py +++ b/lib/charms/tempo_coordinator_k8s/v0/tracing.py @@ -110,7 +110,7 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 7 +LIBPATCH = 8 PYDEPS = ["pydantic"] @@ -169,6 +169,10 @@ class DataValidationError(TracingError): """Raised when data validation fails on IPU relation data.""" +class DataAccessPermissionError(TracingError): + """Raised when follower units attempt leader-only operations.""" + + class AmbiguousRelationUsageError(TracingError): """Raised when one wrongly assumes that there can only be one relation on an endpoint.""" @@ -779,7 +783,7 @@ def __init__( self.framework.observe(events.relation_changed, self._on_tracing_relation_changed) self.framework.observe(events.relation_broken, self._on_tracing_relation_broken) - if protocols: + if protocols and self._charm.unit.is_leader(): # we can't be sure that the current event context supports read/writing relation data for this relation, # so we catch ModelErrors. This is because we're doing this in init. try: @@ -809,6 +813,8 @@ def request_protocols( TracingRequirerAppData( receivers=list(protocols), ).dump(relation.data[self._charm.app]) + else: + raise DataAccessPermissionError("only leaders can request_protocols") @property def relations(self) -> List[Relation]: @@ -957,7 +963,15 @@ def charm_tracing_config( if not endpoint_requirer.is_ready(): return None, None - endpoint = endpoint_requirer.get_endpoint("otlp_http") + try: + endpoint = endpoint_requirer.get_endpoint("otlp_http") + except ModelError as e: + if e.args[0] == "ERROR permission denied\n": + # this can happen the app databag doesn't have data, + # or we're breaking the relation. + return None, None + raise + if not endpoint: return None, None diff --git a/lib/charms/tls_certificates_interface/v2/tls_certificates.py b/lib/charms/tls_certificates_interface/v2/tls_certificates.py index c232362feb..8023d85ddd 100644 --- a/lib/charms/tls_certificates_interface/v2/tls_certificates.py +++ b/lib/charms/tls_certificates_interface/v2/tls_certificates.py @@ -282,10 +282,10 @@ def _on_all_certificates_invalidated(self, event: AllCertificatesInvalidatedEven from typing import Any, Dict, List, Literal, Optional, Union from cryptography import x509 -from cryptography.hazmat._oid import ExtensionOID from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives.serialization import pkcs12 +from cryptography.x509.oid import ExtensionOID from jsonschema import exceptions, validate from ops.charm import ( CharmBase, @@ -307,7 +307,7 @@ def _on_all_certificates_invalidated(self, event: AllCertificatesInvalidatedEven # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 29 +LIBPATCH = 30 PYDEPS = ["cryptography", "jsonschema"] From 59788791b0517e990c2ded7b5895b0fdfbfd69ff Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 25 Jun 2025 17:18:03 +0300 Subject: [PATCH 15/37] Compare to local hash --- src/charm.py | 6 ++---- tests/unit/test_charm.py | 1 + 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/charm.py b/src/charm.py index 27a1dcf0bf..5bd2e6f0cf 100755 --- a/src/charm.py +++ b/src/charm.py @@ -2051,13 +2051,11 @@ def _can_connect_to_postgresql(self) -> bool: def update_config(self, is_creating_backup: bool = False) -> bool: """Updates Patroni config file based on the existence of the TLS files.""" - if self.unit_peer_data.get("hba_hash") != self.app_peer_data.get("hba_hash"): + hba_hash = self.postgresql_client_relation.generate_hba_hash() + if self.unit_peer_data.get("hba_hash") != hba_hash: logger.info("Updating hba definitions") self.postgresql_client_relation.append_to_pg_hba() - hba_hash = self.postgresql_client_relation.generate_hba_hash() self.unit_peer_data.update({"hba_hash": hba_hash}) - if self.unit.is_leader(): - self.app_peer_data.update({"hba_hash": hba_hash}) # Retrieve PostgreSQL parameters. if self.config.profile_limit_memory: limit_memory = self.config.profile_limit_memory * 10**6 diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index a624dab832..50b13b1fd7 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -1696,6 +1696,7 @@ def test_update_config(harness): "charm.PostgresqlOperatorCharm.is_tls_enabled", new_callable=PropertyMock ) as _is_tls_enabled, patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, + patch("charm.PostgreSQLProvider.append_to_pg_hba"), ): rel_id = harness.model.get_relation(PEER).id # Mock some properties. From e04b552e324cdbcaa8236d92e78d4df8535f622e Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 25 Jun 2025 17:21:28 +0300 Subject: [PATCH 16/37] Cla check for 16/edge --- .github/workflows/cla-check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cla-check.yml b/.github/workflows/cla-check.yml index 2567517472..32848a0a5f 100644 --- a/.github/workflows/cla-check.yml +++ b/.github/workflows/cla-check.yml @@ -2,7 +2,7 @@ name: CLA check on: pull_request: - branches: [main] + branches: [main, 16/edge] jobs: cla-check: From 41da71587c5ebb3c3a48b90aff82458fd54b90f5 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 25 Jun 2025 18:06:07 +0300 Subject: [PATCH 17/37] Don't defer peer change before init --- src/charm.py | 4 ++-- tests/unit/test_charm.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/charm.py b/src/charm.py index 5bd2e6f0cf..158ebd5c58 100755 --- a/src/charm.py +++ b/src/charm.py @@ -580,14 +580,14 @@ def _on_peer_relation_changed(self, event: HookEvent) -> None: # noqa: C901 if self.unit.is_leader(): if self._initialize_cluster(event): logger.debug("Deferring on_peer_relation_changed: Leader initialized cluster") + event.defer() else: logger.debug("_initialized_cluster failed on _peer_relation_changed") return else: logger.debug( - "Deferring on_peer_relation_changed: Cluster must be initialized before members can join" + "Early exit on_peer_relation_changed: Cluster must be initialized before members can join" ) - event.defer() return # If the leader is the one receiving the event, it adds the new members, diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 50b13b1fd7..3119c3ac56 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -1425,7 +1425,7 @@ def test_on_peer_relation_changed(harness): harness.set_can_connect(POSTGRESQL_CONTAINER, True) relation = harness.model.get_relation(PEER, rel_id) harness.charm.on.database_peers_relation_changed.emit(relation) - _defer.assert_called_once() + assert not _defer.called _add_members.assert_not_called() _update_config.assert_not_called() _coordinate_stanza_fields.assert_not_called() From 12ba66242391827ebfa5b8b972f37f5d399a2a7d Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 25 Jun 2025 18:39:09 +0300 Subject: [PATCH 18/37] Add back app check --- src/charm.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/charm.py b/src/charm.py index 158ebd5c58..1a5fe2c915 100755 --- a/src/charm.py +++ b/src/charm.py @@ -2052,10 +2052,16 @@ def _can_connect_to_postgresql(self) -> bool: def update_config(self, is_creating_backup: bool = False) -> bool: """Updates Patroni config file based on the existence of the TLS files.""" hba_hash = self.postgresql_client_relation.generate_hba_hash() - if self.unit_peer_data.get("hba_hash") != hba_hash: + if ( + self.unit_peer_data.get("hba_hash") != hba_hash + or self.app_peer_data.get("hba_hash") != hba_hash + ): logger.info("Updating hba definitions") self.postgresql_client_relation.append_to_pg_hba() self.unit_peer_data.update({"hba_hash": hba_hash}) + if self.unit.is_leader(): + self.app_peer_data.update({"hba_hash": hba_hash}) + # Retrieve PostgreSQL parameters. if self.config.profile_limit_memory: limit_memory = self.config.profile_limit_memory * 10**6 From a2641934198757ae1ff705333b13ddbe1c82ea70 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 25 Jun 2025 20:04:57 +0300 Subject: [PATCH 19/37] Revert back to just updating peer data --- src/relations/postgresql_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 421d81e8d9..0f055a4819 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -115,7 +115,7 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: return hba_hash = self.generate_hba_hash() - self.charm.update_config() + self.charm.app_peer_data.update({"hba_hash": hba_hash}) for key in self.charm._peers.data: # We skip the leader so we don't have to wait on the defer if ( From d8c0a0055ad8f5b9d5c5ea6ee522bfa8dfe5ea17 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 25 Jun 2025 23:07:51 +0300 Subject: [PATCH 20/37] Only sync hba once initially set --- src/charm.py | 18 ++++++++++++------ src/relations/postgresql_provider.py | 7 ++++--- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/charm.py b/src/charm.py index 1a5fe2c915..f572d24cd8 100755 --- a/src/charm.py +++ b/src/charm.py @@ -2051,16 +2051,22 @@ def _can_connect_to_postgresql(self) -> bool: def update_config(self, is_creating_backup: bool = False) -> bool: """Updates Patroni config file based on the existence of the TLS files.""" - hba_hash = self.postgresql_client_relation.generate_hba_hash() - if ( - self.unit_peer_data.get("hba_hash") != hba_hash - or self.app_peer_data.get("hba_hash") != hba_hash + # Don't track hba changes until database created sets it + if "hba_hash" in self.app_peer_data and ( + self.unit_peer_data.get("hba_hash") + != self.postgresql_client_relation.generate_hba_hash + or self.app_peer_data.get("hba_hash") + != self.postgresql_client_relation.generate_hba_hash ): logger.info("Updating hba definitions") self.postgresql_client_relation.append_to_pg_hba() - self.unit_peer_data.update({"hba_hash": hba_hash}) + self.unit_peer_data.update({ + "hba_hash": self.postgresql_client_relation.generate_hba_hash + }) if self.unit.is_leader(): - self.app_peer_data.update({"hba_hash": hba_hash}) + self.app_peer_data.update({ + "hba_hash": self.postgresql_client_relation.generate_hba_hash + }) # Retrieve PostgreSQL parameters. if self.config.profile_limit_memory: diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 0f055a4819..8de3bba4ab 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -69,6 +69,7 @@ def __init__(self, charm: CharmBase, relation_name: str = "database") -> None: self.database_provides.on.database_requested, self._on_database_requested ) + @property def generate_hba_hash(self) -> str: """Generate expected user and database hash.""" user_db_pairs = {} @@ -114,14 +115,14 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: event.defer() return - hba_hash = self.generate_hba_hash() - self.charm.app_peer_data.update({"hba_hash": hba_hash}) + self.charm.app_peer_data.update({"hba_hash": self.generate_hba_hash}) + self.charm.update_config() for key in self.charm._peers.data: # We skip the leader so we don't have to wait on the defer if ( key != self.charm.app and key != self.charm.unit - and self.charm._peers.data[key].get("hba_hash", "") != hba_hash + and self.charm._peers.data[key].get("hba_hash", "") != self.generate_hba_hash ): logger.debug("Not all units have synced configuration") event.defer() From 2226c0a9fd79e39da33f5436713237fd5de68d65 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 25 Jun 2025 23:37:16 +0300 Subject: [PATCH 21/37] Bump timeout --- tests/integration/test_pg_hba.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_pg_hba.py b/tests/integration/test_pg_hba.py index dbcac32f82..95cac54229 100644 --- a/tests/integration/test_pg_hba.py +++ b/tests/integration/test_pg_hba.py @@ -51,7 +51,7 @@ async def test_pg_hba(ops_test: OpsTest, charm): await ops_test.model.add_relation(DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME) await ops_test.model.wait_for_idle( - apps=[DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME], status="active" + apps=[DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME], status="active", timeout=1000 ) primary = await get_primary(ops_test) From 85cdbe77476ba9c5c8a3c4ae5e1e160913845044 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Fri, 27 Jun 2025 00:51:58 +0300 Subject: [PATCH 22/37] Don't filter appends to pg_hba --- lib/charms/postgresql_k8s/v0/postgresql.py | 9 +++------ src/relations/postgresql_provider.py | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index 71cc966f51..54453a3623 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -35,7 +35,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 54 +LIBPATCH = 53 # Groups to distinguish HBA access ACCESS_GROUP_IDENTITY = "identity_access" @@ -1082,14 +1082,11 @@ def validate_group_map(self, group_map: Optional[str]) -> bool: return True - def is_user_in_hba(self, username: str, current_host=False) -> bool: + def is_user_in_hba(self, username: str) -> bool: """Check if user was added in pg_hba.""" - host = self.current_host if current_host else None connection = None try: - with self._connect_to_database( - database_host=host - ) as connection, connection.cursor() as cursor: + with self._connect_to_database() as connection, connection.cursor() as cursor: cursor.execute( SQL( "SELECT COUNT(*) FROM pg_hba_file_rules WHERE {} = ANY(user_name);" diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 8de3bba4ab..47d2bd2b1c 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -86,7 +86,7 @@ def append_to_pg_hba(self) -> None: user = f"relation_id_{relation.id}" if database := self.database_provides.fetch_relation_field( relation.id, "database" - ) and not self.charm.postgresql.is_user_in_hba(user, current_host=True): + ): file.write( f"{'hostssl' if self.charm.is_tls_enabled else 'host'} {database} {user} 0.0.0.0/0 md5" ) From 49574a87f6eb60a664bd53af7406fe52bf7346f4 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Tue, 1 Jul 2025 15:06:13 +0300 Subject: [PATCH 23/37] Append the rel users directly to the user map --- src/charm.py | 42 +++++++++++++++++----------- src/relations/postgresql_provider.py | 24 +++------------- tests/unit/test_charm.py | 1 - 3 files changed, 29 insertions(+), 38 deletions(-) diff --git a/src/charm.py b/src/charm.py index f572d24cd8..119f6f84a1 100755 --- a/src/charm.py +++ b/src/charm.py @@ -2051,23 +2051,6 @@ def _can_connect_to_postgresql(self) -> bool: def update_config(self, is_creating_backup: bool = False) -> bool: """Updates Patroni config file based on the existence of the TLS files.""" - # Don't track hba changes until database created sets it - if "hba_hash" in self.app_peer_data and ( - self.unit_peer_data.get("hba_hash") - != self.postgresql_client_relation.generate_hba_hash - or self.app_peer_data.get("hba_hash") - != self.postgresql_client_relation.generate_hba_hash - ): - logger.info("Updating hba definitions") - self.postgresql_client_relation.append_to_pg_hba() - self.unit_peer_data.update({ - "hba_hash": self.postgresql_client_relation.generate_hba_hash - }) - if self.unit.is_leader(): - self.app_peer_data.update({ - "hba_hash": self.postgresql_client_relation.generate_hba_hash - }) - # Retrieve PostgreSQL parameters. if self.config.profile_limit_memory: limit_memory = self.config.profile_limit_memory * 10**6 @@ -2136,6 +2119,21 @@ def update_config(self, is_creating_backup: bool = False) -> bool: self._restart_metrics_service() self._restart_ldap_sync_service() + # Don't track hba changes until database created sets it + if "user_hash" in self.app_peer_data and ( + self.unit_peer_data.get("user_hash") + != self.postgresql_client_relation.generate_user_hash + or self.app_peer_data.get("user_hash") + != self.postgresql_client_relation.generate_user_hash + ): + logger.info("Updating user hash") + self.unit_peer_data.update({ + "user_hash": self.postgresql_client_relation.generate_user_hash + }) + if self.unit.is_leader(): + self.app_peer_data.update({ + "user_hash": self.postgresql_client_relation.generate_user_hash + }) return True def _validate_config_options(self) -> None: @@ -2333,6 +2331,16 @@ def relations_user_databases_map(self) -> dict: user, current_host=self.is_connectivity_enabled ) ) + + # Copy relations users directly instead of waiting for them to be created + for relation in self.model.relations[self.postgresql_client_relation.relation_name]: + user = f"relation_id_{relation.id}" + if user not in user_database_map and ( + database := self.postgresql_client_relation.database_provides.fetch_relation_field( + relation.id, "database" + ) + ): + user_database_map[user] = database return user_database_map def override_patroni_on_failure_condition( diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 47d2bd2b1c..44ee87b984 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -23,11 +23,7 @@ from ops.framework import Object from ops.model import ActiveStatus, BlockedStatus, Relation -from constants import ( - DATABASE_PORT, - ENDPOINT_SIMULTANEOUSLY_BLOCKING_MESSAGE, - POSTGRESQL_DATA_PATH, -) +from constants import DATABASE_PORT, ENDPOINT_SIMULTANEOUSLY_BLOCKING_MESSAGE from utils import new_password logger = logging.getLogger(__name__) @@ -70,7 +66,7 @@ def __init__(self, charm: CharmBase, relation_name: str = "database") -> None: ) @property - def generate_hba_hash(self) -> str: + def generate_user_hash(self) -> str: """Generate expected user and database hash.""" user_db_pairs = {} for relation in self.model.relations[self.relation_name]: @@ -79,18 +75,6 @@ def generate_hba_hash(self) -> str: user_db_pairs[user] = database return shake_128(str(user_db_pairs).encode()).hexdigest(16) - def append_to_pg_hba(self) -> None: - """Append missing users to pg hba.""" - with open(f"{POSTGRESQL_DATA_PATH}/pg_hba.conf", "a") as file: - for relation in self.model.relations[self.relation_name]: - user = f"relation_id_{relation.id}" - if database := self.database_provides.fetch_relation_field( - relation.id, "database" - ): - file.write( - f"{'hostssl' if self.charm.is_tls_enabled else 'host'} {database} {user} 0.0.0.0/0 md5" - ) - @staticmethod def _sanitize_extra_roles(extra_roles: str | None) -> list[str]: """Standardize and sanitize user extra-roles.""" @@ -115,14 +99,14 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: event.defer() return - self.charm.app_peer_data.update({"hba_hash": self.generate_hba_hash}) + self.charm.app_peer_data.update({"user_hash": self.generate_user_hash}) self.charm.update_config() for key in self.charm._peers.data: # We skip the leader so we don't have to wait on the defer if ( key != self.charm.app and key != self.charm.unit - and self.charm._peers.data[key].get("hba_hash", "") != self.generate_hba_hash + and self.charm._peers.data[key].get("user_hash", "") != self.generate_user_hash ): logger.debug("Not all units have synced configuration") event.defer() diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 3119c3ac56..84759e94b3 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -1696,7 +1696,6 @@ def test_update_config(harness): "charm.PostgresqlOperatorCharm.is_tls_enabled", new_callable=PropertyMock ) as _is_tls_enabled, patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, - patch("charm.PostgreSQLProvider.append_to_pg_hba"), ): rel_id = harness.model.get_relation(PEER).id # Mock some properties. From f94f6f3da63f4d6b3602f8deeee6be3bfb91e25a Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 3 Jul 2025 14:10:49 +0300 Subject: [PATCH 24/37] Add idle timeout --- tests/integration/new_relations/test_new_relations_1.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/integration/new_relations/test_new_relations_1.py b/tests/integration/new_relations/test_new_relations_1.py index 4a94fbcfbf..a49313e91f 100644 --- a/tests/integration/new_relations/test_new_relations_1.py +++ b/tests/integration/new_relations/test_new_relations_1.py @@ -247,7 +247,7 @@ async def test_an_application_can_connect_to_multiple_database_clusters(ops_test f"{APPLICATION_APP_NAME}:{MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME}", ANOTHER_DATABASE_APP_NAME, ) - await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active") + await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active", idle_period=30) # Retrieve the connection string to both database clusters using the relation aliases # and assert they are different. @@ -310,7 +310,9 @@ async def test_an_application_can_request_multiple_databases(ops_test: OpsTest): await ops_test.model.add_relation( f"{APPLICATION_APP_NAME}:{SECOND_DATABASE_RELATION_NAME}", DATABASE_APP_NAME ) - await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active", timeout=15 * 60) + await ops_test.model.wait_for_idle( + apps=APP_NAMES, status="active", timeout=15 * 60, idle_period=30 + ) # Get the connection strings to connect to both databases. for attempt in Retrying(stop=stop_after_attempt(15), wait=wait_fixed(3), reraise=True): From d9add3592a7dae9e97bf9b68b6717c910dc620c5 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Tue, 15 Jul 2025 16:53:18 +0300 Subject: [PATCH 25/37] Remove trigger --- lib/charms/postgresql_k8s/v0/postgresql.py | 79 +--------------------- 1 file changed, 1 insertion(+), 78 deletions(-) diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index 54453a3623..7ba643aadb 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -35,7 +35,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 53 +LIBPATCH = 54 # Groups to distinguish HBA access ACCESS_GROUP_IDENTITY = "identity_access" @@ -782,83 +782,6 @@ def set_up_database(self) -> None: connection = None cursor = None try: - with self._connect_to_database( - database="template1" - ) as connection, connection.cursor() as cursor: - # Create database function and event trigger to identify users created by PgBouncer. - cursor.execute( - "SELECT TRUE FROM pg_event_trigger WHERE evtname = 'update_pg_hba_on_create_schema';" - ) - if cursor.fetchone() is None: - cursor.execute(""" -CREATE OR REPLACE FUNCTION update_pg_hba() - RETURNS event_trigger - LANGUAGE plpgsql - AS $$ - DECLARE - hba_file TEXT; - copy_command TEXT; - connection_type TEXT; - rec record; - insert_value TEXT; - changes INTEGER = 0; - BEGIN - -- Don't execute on replicas. - IF NOT pg_is_in_recovery() THEN - -- Load the current authorisation rules. - DROP TABLE IF EXISTS pg_hba; - CREATE TEMPORARY TABLE pg_hba (lines TEXT); - SELECT setting INTO hba_file FROM pg_settings WHERE name = 'hba_file'; - IF hba_file IS NOT NULL THEN - copy_command='COPY pg_hba FROM ''' || hba_file || '''' ; - EXECUTE copy_command; - -- Build a list of the relation users and the databases they can access. - DROP TABLE IF EXISTS relation_users; - CREATE TEMPORARY TABLE relation_users AS - SELECT t.user, STRING_AGG(DISTINCT t.database, ',') AS databases FROM( SELECT u.usename AS user, CASE WHEN u.usesuper THEN 'all' ELSE d.datname END AS database FROM ( SELECT usename, usesuper FROM pg_catalog.pg_user WHERE usename NOT IN ('backup', 'monitoring', 'operator', 'postgres', 'replication', 'rewind')) AS u JOIN ( SELECT datname FROM pg_catalog.pg_database WHERE NOT datistemplate ) AS d ON has_database_privilege(u.usename, d.datname, 'CONNECT') ) AS t GROUP BY 1; - IF (SELECT COUNT(lines) FROM pg_hba WHERE lines LIKE 'hostssl %') > 0 THEN - connection_type := 'hostssl'; - ELSE - connection_type := 'host'; - END IF; - -- Add the new users to the pg_hba file. - FOR rec IN SELECT * FROM relation_users - LOOP - insert_value := connection_type || ' ' || rec.databases || ' ' || rec.user || ' 0.0.0.0/0 md5'; - IF (SELECT COUNT(lines) FROM pg_hba WHERE lines = insert_value) = 0 THEN - INSERT INTO pg_hba (lines) VALUES (insert_value); - changes := changes + 1; - END IF; - END LOOP; - -- Remove users that don't exist anymore from the pg_hba file. - FOR rec IN SELECT h.lines FROM pg_hba AS h LEFT JOIN relation_users AS r ON SPLIT_PART(h.lines, ' ', 3) = r.user WHERE r.user IS NULL AND (SPLIT_PART(h.lines, ' ', 3) LIKE 'relation_id_%' OR SPLIT_PART(h.lines, ' ', 3) LIKE 'pgbouncer_auth_relation_%' OR SPLIT_PART(h.lines, ' ', 3) LIKE '%_user_%_%') - LOOP - DELETE FROM pg_hba WHERE lines = rec.lines; - changes := changes + 1; - END LOOP; - -- Apply the changes to the pg_hba file. - IF changes > 0 THEN - copy_command='COPY pg_hba TO ''' || hba_file || '''' ; - EXECUTE copy_command; - PERFORM pg_reload_conf(); - END IF; - END IF; - END IF; - END; - $$; - """) - cursor.execute(""" -CREATE EVENT TRIGGER update_pg_hba_on_create_schema - ON ddl_command_end - WHEN TAG IN ('CREATE SCHEMA') - EXECUTE FUNCTION update_pg_hba(); - """) - cursor.execute(""" -CREATE EVENT TRIGGER update_pg_hba_on_drop_schema - ON ddl_command_end - WHEN TAG IN ('DROP SCHEMA') - EXECUTE FUNCTION update_pg_hba(); - """) with self._connect_to_database() as connection, connection.cursor() as cursor: cursor.execute("SELECT TRUE FROM pg_roles WHERE rolname='admin';") if cursor.fetchone() is None: From 4f23dab36df19b1d40511ff2950add586265c293 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Tue, 15 Jul 2025 19:15:38 +0300 Subject: [PATCH 26/37] Sleep longer --- .../new_relations/test_new_relations_1.py | 13 ++++++------- .../new_relations/test_relations_coherence.py | 4 ++-- tests/integration/test_pg_hba.py | 5 ++++- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/tests/integration/new_relations/test_new_relations_1.py b/tests/integration/new_relations/test_new_relations_1.py index a49313e91f..2313d05805 100644 --- a/tests/integration/new_relations/test_new_relations_1.py +++ b/tests/integration/new_relations/test_new_relations_1.py @@ -14,11 +14,7 @@ from constants import DATABASE_DEFAULT_NAME -from ..helpers import ( - CHARM_BASE, - check_database_users_existence, - scale_application, -) +from ..helpers import CHARM_BASE, check_database_users_existence, scale_application from .helpers import ( build_connection_string, get_application_relation_data, @@ -454,7 +450,9 @@ async def test_admin_role(ops_test: OpsTest): all_app_names = [DATA_INTEGRATOR_APP_NAME] all_app_names.extend(APP_NAMES) async with ops_test.fast_forward(): - await ops_test.model.deploy(DATA_INTEGRATOR_APP_NAME, base=CHARM_BASE) + await ops_test.model.deploy( + DATA_INTEGRATOR_APP_NAME, channel="latest/edge", series="jammy" + ) await ops_test.model.wait_for_idle(apps=[DATA_INTEGRATOR_APP_NAME], status="blocked") await ops_test.model.applications[DATA_INTEGRATOR_APP_NAME].set_config({ "database-name": DATA_INTEGRATOR_APP_NAME.replace("-", "_"), @@ -546,7 +544,8 @@ async def test_invalid_extra_user_roles(ops_test: OpsTest): await ops_test.model.deploy( DATA_INTEGRATOR_APP_NAME, application_name=another_data_integrator_app_name, - base=CHARM_BASE, + channel="latest/edge", + series="jammy", ) await ops_test.model.wait_for_idle( apps=[another_data_integrator_app_name], status="blocked" diff --git a/tests/integration/new_relations/test_relations_coherence.py b/tests/integration/new_relations/test_relations_coherence.py index 41b94afd41..7e444d9b2e 100644 --- a/tests/integration/new_relations/test_relations_coherence.py +++ b/tests/integration/new_relations/test_relations_coherence.py @@ -11,7 +11,7 @@ from constants import DATABASE_DEFAULT_NAME -from ..helpers import CHARM_BASE, DATABASE_APP_NAME, build_and_deploy +from ..helpers import DATABASE_APP_NAME, build_and_deploy from .helpers import build_connection_string from .test_new_relations_1 import DATA_INTEGRATOR_APP_NAME @@ -31,7 +31,7 @@ async def test_relations(ops_test: OpsTest, charm): await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=3000) # Creating first time relation with user role - await ops_test.model.deploy(DATA_INTEGRATOR_APP_NAME, base=CHARM_BASE) + await ops_test.model.deploy(DATA_INTEGRATOR_APP_NAME, series="jammy") await ops_test.model.applications[DATA_INTEGRATOR_APP_NAME].set_config({ "database-name": DATA_INTEGRATOR_APP_NAME.replace("-", "_"), }) diff --git a/tests/integration/test_pg_hba.py b/tests/integration/test_pg_hba.py index 95cac54229..9242f7c9f2 100644 --- a/tests/integration/test_pg_hba.py +++ b/tests/integration/test_pg_hba.py @@ -26,6 +26,9 @@ SECOND_RELATION_USER = "relation_id_1" PASSWORD = "test-password" +# Topology observer period * 1.5 +SLEEP_TIME = 45 + @pytest.mark.abort_on_fail async def test_pg_hba(ops_test: OpsTest, charm): @@ -99,7 +102,7 @@ async def test_pg_hba(ops_test: OpsTest, charm): if connection: connection.close() - sleep(30) + sleep(SLEEP_TIME) for unit in ops_test.model.applications[DATABASE_APP_NAME].units: try: From af03cd05690fbdb8e6f5ee321f7f2f16203cf4b7 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Tue, 15 Jul 2025 21:15:22 +0300 Subject: [PATCH 27/37] Set extra user roles --- tests/integration/new_relations/test_new_relations_1.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/integration/new_relations/test_new_relations_1.py b/tests/integration/new_relations/test_new_relations_1.py index 2313d05805..a7565fa7d1 100644 --- a/tests/integration/new_relations/test_new_relations_1.py +++ b/tests/integration/new_relations/test_new_relations_1.py @@ -50,7 +50,8 @@ async def test_database_relation_with_charm_libraries(ops_test: OpsTest, charm): application_name=APPLICATION_APP_NAME, num_units=2, base=CHARM_BASE, - channel="edge", + channel="latest/edge", + config={"extra_user_roles": "CREATEDB,CREATEROLE"}, ), ops_test.model.deploy( charm, From 62834af6f6576b6636822bdf96978aa5ca6450e4 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 16 Jul 2025 03:03:45 +0300 Subject: [PATCH 28/37] Always update hash --- src/charm.py | 18 +++++------------- src/relations/postgresql_provider.py | 1 - 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/src/charm.py b/src/charm.py index 119f6f84a1..ad6a653b9e 100755 --- a/src/charm.py +++ b/src/charm.py @@ -2119,21 +2119,13 @@ def update_config(self, is_creating_backup: bool = False) -> bool: self._restart_metrics_service() self._restart_ldap_sync_service() - # Don't track hba changes until database created sets it - if "user_hash" in self.app_peer_data and ( - self.unit_peer_data.get("user_hash") - != self.postgresql_client_relation.generate_user_hash - or self.app_peer_data.get("user_hash") - != self.postgresql_client_relation.generate_user_hash - ): - logger.info("Updating user hash") - self.unit_peer_data.update({ + self.unit_peer_data.update({ + "user_hash": self.postgresql_client_relation.generate_user_hash + }) + if self.unit.is_leader(): + self.app_peer_data.update({ "user_hash": self.postgresql_client_relation.generate_user_hash }) - if self.unit.is_leader(): - self.app_peer_data.update({ - "user_hash": self.postgresql_client_relation.generate_user_hash - }) return True def _validate_config_options(self) -> None: diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 44ee87b984..8a824c959d 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -99,7 +99,6 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: event.defer() return - self.charm.app_peer_data.update({"user_hash": self.generate_user_hash}) self.charm.update_config() for key in self.charm._peers.data: # We skip the leader so we don't have to wait on the defer From 80ee8f88ab945f9ba4192c115859a108e3e2d6f7 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 16 Jul 2025 15:03:59 +0300 Subject: [PATCH 29/37] Bump sleep period --- tests/integration/test_pg_hba.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_pg_hba.py b/tests/integration/test_pg_hba.py index 9242f7c9f2..f50ff05bfe 100644 --- a/tests/integration/test_pg_hba.py +++ b/tests/integration/test_pg_hba.py @@ -26,8 +26,8 @@ SECOND_RELATION_USER = "relation_id_1" PASSWORD = "test-password" -# Topology observer period * 1.5 -SLEEP_TIME = 45 +# Topology observer period * 2 +SLEEP_TIME = 60 @pytest.mark.abort_on_fail From 78aef57ec9944525bc05bb94e05eb7f0cf80bd50 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 16 Jul 2025 16:34:13 +0300 Subject: [PATCH 30/37] Revert the trigger --- lib/charms/postgresql_k8s/v0/postgresql.py | 79 +++++++++++++++++++++- tests/integration/test_pg_hba.py | 5 +- 2 files changed, 79 insertions(+), 5 deletions(-) diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index 7ba643aadb..54453a3623 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -35,7 +35,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 54 +LIBPATCH = 53 # Groups to distinguish HBA access ACCESS_GROUP_IDENTITY = "identity_access" @@ -782,6 +782,83 @@ def set_up_database(self) -> None: connection = None cursor = None try: + with self._connect_to_database( + database="template1" + ) as connection, connection.cursor() as cursor: + # Create database function and event trigger to identify users created by PgBouncer. + cursor.execute( + "SELECT TRUE FROM pg_event_trigger WHERE evtname = 'update_pg_hba_on_create_schema';" + ) + if cursor.fetchone() is None: + cursor.execute(""" +CREATE OR REPLACE FUNCTION update_pg_hba() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ + DECLARE + hba_file TEXT; + copy_command TEXT; + connection_type TEXT; + rec record; + insert_value TEXT; + changes INTEGER = 0; + BEGIN + -- Don't execute on replicas. + IF NOT pg_is_in_recovery() THEN + -- Load the current authorisation rules. + DROP TABLE IF EXISTS pg_hba; + CREATE TEMPORARY TABLE pg_hba (lines TEXT); + SELECT setting INTO hba_file FROM pg_settings WHERE name = 'hba_file'; + IF hba_file IS NOT NULL THEN + copy_command='COPY pg_hba FROM ''' || hba_file || '''' ; + EXECUTE copy_command; + -- Build a list of the relation users and the databases they can access. + DROP TABLE IF EXISTS relation_users; + CREATE TEMPORARY TABLE relation_users AS + SELECT t.user, STRING_AGG(DISTINCT t.database, ',') AS databases FROM( SELECT u.usename AS user, CASE WHEN u.usesuper THEN 'all' ELSE d.datname END AS database FROM ( SELECT usename, usesuper FROM pg_catalog.pg_user WHERE usename NOT IN ('backup', 'monitoring', 'operator', 'postgres', 'replication', 'rewind')) AS u JOIN ( SELECT datname FROM pg_catalog.pg_database WHERE NOT datistemplate ) AS d ON has_database_privilege(u.usename, d.datname, 'CONNECT') ) AS t GROUP BY 1; + IF (SELECT COUNT(lines) FROM pg_hba WHERE lines LIKE 'hostssl %') > 0 THEN + connection_type := 'hostssl'; + ELSE + connection_type := 'host'; + END IF; + -- Add the new users to the pg_hba file. + FOR rec IN SELECT * FROM relation_users + LOOP + insert_value := connection_type || ' ' || rec.databases || ' ' || rec.user || ' 0.0.0.0/0 md5'; + IF (SELECT COUNT(lines) FROM pg_hba WHERE lines = insert_value) = 0 THEN + INSERT INTO pg_hba (lines) VALUES (insert_value); + changes := changes + 1; + END IF; + END LOOP; + -- Remove users that don't exist anymore from the pg_hba file. + FOR rec IN SELECT h.lines FROM pg_hba AS h LEFT JOIN relation_users AS r ON SPLIT_PART(h.lines, ' ', 3) = r.user WHERE r.user IS NULL AND (SPLIT_PART(h.lines, ' ', 3) LIKE 'relation_id_%' OR SPLIT_PART(h.lines, ' ', 3) LIKE 'pgbouncer_auth_relation_%' OR SPLIT_PART(h.lines, ' ', 3) LIKE '%_user_%_%') + LOOP + DELETE FROM pg_hba WHERE lines = rec.lines; + changes := changes + 1; + END LOOP; + -- Apply the changes to the pg_hba file. + IF changes > 0 THEN + copy_command='COPY pg_hba TO ''' || hba_file || '''' ; + EXECUTE copy_command; + PERFORM pg_reload_conf(); + END IF; + END IF; + END IF; + END; + $$; + """) + cursor.execute(""" +CREATE EVENT TRIGGER update_pg_hba_on_create_schema + ON ddl_command_end + WHEN TAG IN ('CREATE SCHEMA') + EXECUTE FUNCTION update_pg_hba(); + """) + cursor.execute(""" +CREATE EVENT TRIGGER update_pg_hba_on_drop_schema + ON ddl_command_end + WHEN TAG IN ('DROP SCHEMA') + EXECUTE FUNCTION update_pg_hba(); + """) with self._connect_to_database() as connection, connection.cursor() as cursor: cursor.execute("SELECT TRUE FROM pg_roles WHERE rolname='admin';") if cursor.fetchone() is None: diff --git a/tests/integration/test_pg_hba.py b/tests/integration/test_pg_hba.py index f50ff05bfe..95cac54229 100644 --- a/tests/integration/test_pg_hba.py +++ b/tests/integration/test_pg_hba.py @@ -26,9 +26,6 @@ SECOND_RELATION_USER = "relation_id_1" PASSWORD = "test-password" -# Topology observer period * 2 -SLEEP_TIME = 60 - @pytest.mark.abort_on_fail async def test_pg_hba(ops_test: OpsTest, charm): @@ -102,7 +99,7 @@ async def test_pg_hba(ops_test: OpsTest, charm): if connection: connection.close() - sleep(SLEEP_TIME) + sleep(30) for unit in ops_test.model.applications[DATABASE_APP_NAME].units: try: From 3a39640760fd919f235774d50bfe4e3a7e8fd014 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 17 Jul 2025 03:23:59 +0300 Subject: [PATCH 31/37] Move generate_user_hash to charm --- src/charm.py | 14 ++++++++------ src/relations/postgresql_provider.py | 14 ++------------ 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/src/charm.py b/src/charm.py index ad6a653b9e..34bae7f855 100755 --- a/src/charm.py +++ b/src/charm.py @@ -13,6 +13,7 @@ import shutil import sys import time +from hashlib import shake_128 from pathlib import Path from typing import Literal, get_args from urllib.parse import urlparse @@ -2119,13 +2120,9 @@ def update_config(self, is_creating_backup: bool = False) -> bool: self._restart_metrics_service() self._restart_ldap_sync_service() - self.unit_peer_data.update({ - "user_hash": self.postgresql_client_relation.generate_user_hash - }) + self.unit_peer_data.update({"user_hash": self.generate_user_hash}) if self.unit.is_leader(): - self.app_peer_data.update({ - "user_hash": self.postgresql_client_relation.generate_user_hash - }) + self.app_peer_data.update({"user_hash": self.generate_user_hash}) return True def _validate_config_options(self) -> None: @@ -2335,6 +2332,11 @@ def relations_user_databases_map(self) -> dict: user_database_map[user] = database return user_database_map + @property + def generate_user_hash(self) -> str: + """Generate expected user and database hash.""" + return shake_128(str(self.relations_user_databases_map).encode()).hexdigest(16) + def override_patroni_on_failure_condition( self, new_condition: str, repeat_cause: str | None ) -> bool: diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 8a824c959d..84ba570b3f 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -4,7 +4,6 @@ """Postgres client relation hooks & helpers.""" import logging -from hashlib import shake_128 from charms.data_platform_libs.v0.data_interfaces import ( DatabaseProvides, @@ -65,16 +64,6 @@ def __init__(self, charm: CharmBase, relation_name: str = "database") -> None: self.database_provides.on.database_requested, self._on_database_requested ) - @property - def generate_user_hash(self) -> str: - """Generate expected user and database hash.""" - user_db_pairs = {} - for relation in self.model.relations[self.relation_name]: - if database := self.database_provides.fetch_relation_field(relation.id, "database"): - user = f"relation_id_{relation.id}" - user_db_pairs[user] = database - return shake_128(str(user_db_pairs).encode()).hexdigest(16) - @staticmethod def _sanitize_extra_roles(extra_roles: str | None) -> list[str]: """Standardize and sanitize user extra-roles.""" @@ -105,7 +94,8 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: if ( key != self.charm.app and key != self.charm.unit - and self.charm._peers.data[key].get("user_hash", "") != self.generate_user_hash + and self.charm._peers.data[key].get("user_hash", "") + != self.charm.generate_user_hash ): logger.debug("Not all units have synced configuration") event.defer() From f80167ee70a52516d5bbd13eb81610edfc7d50ef Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Sat, 19 Jul 2025 01:19:29 +0300 Subject: [PATCH 32/37] Conditional hash update --- src/charm.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/charm.py b/src/charm.py index 34bae7f855..c3d9dfd21e 100755 --- a/src/charm.py +++ b/src/charm.py @@ -2120,8 +2120,12 @@ def update_config(self, is_creating_backup: bool = False) -> bool: self._restart_metrics_service() self._restart_ldap_sync_service() - self.unit_peer_data.update({"user_hash": self.generate_user_hash}) - if self.unit.is_leader(): + if self.unit_peer_data.get("user_hash") != self.generate_user_hash: + self.unit_peer_data.update({"user_hash": self.generate_user_hash}) + if ( + self.unit.is_leader() + and self.app_peer_data.get("user_hash") != self.generate_user_hash + ): self.app_peer_data.update({"user_hash": self.generate_user_hash}) return True From 6844c7d83b94ac2436d3a1d996f89c62f526cd65 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Sat, 19 Jul 2025 15:41:50 +0300 Subject: [PATCH 33/37] Try to sort keys --- src/charm.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/charm.py b/src/charm.py index c3d9dfd21e..bc83b43a9b 100755 --- a/src/charm.py +++ b/src/charm.py @@ -2339,7 +2339,9 @@ def relations_user_databases_map(self) -> dict: @property def generate_user_hash(self) -> str: """Generate expected user and database hash.""" - return shake_128(str(self.relations_user_databases_map).encode()).hexdigest(16) + return shake_128( + str(sorted(self.relations_user_databases_map.items())).encode() + ).hexdigest(16) def override_patroni_on_failure_condition( self, new_condition: str, repeat_cause: str | None From 44615bc0bff0679f2fed72cd4c4b0a1f8daae39a Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Sat, 19 Jul 2025 22:14:27 +0300 Subject: [PATCH 34/37] Revert to relation user hash --- src/charm.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/charm.py b/src/charm.py index bc83b43a9b..ee05c7cac9 100755 --- a/src/charm.py +++ b/src/charm.py @@ -2339,9 +2339,14 @@ def relations_user_databases_map(self) -> dict: @property def generate_user_hash(self) -> str: """Generate expected user and database hash.""" - return shake_128( - str(sorted(self.relations_user_databases_map.items())).encode() - ).hexdigest(16) + user_db_pairs = {} + for relation in self.model.relations[self.postgresql_client_relation.relation_name]: + if database := self.postgresql_client_relation.database_provides.fetch_relation_field( + relation.id, "database" + ): + user = f"relation_id_{relation.id}" + user_db_pairs[user] = database + return shake_128(str(user_db_pairs).encode()).hexdigest(16) def override_patroni_on_failure_condition( self, new_condition: str, repeat_cause: str | None From cec1ec06958e39e05568fb7439cae1bde2b12997 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Mon, 21 Jul 2025 16:57:39 +0300 Subject: [PATCH 35/37] Try to reduce the amount of ifs --- src/charm.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/charm.py b/src/charm.py index ee05c7cac9..b9d196ac11 100755 --- a/src/charm.py +++ b/src/charm.py @@ -2120,12 +2120,8 @@ def update_config(self, is_creating_backup: bool = False) -> bool: self._restart_metrics_service() self._restart_ldap_sync_service() - if self.unit_peer_data.get("user_hash") != self.generate_user_hash: - self.unit_peer_data.update({"user_hash": self.generate_user_hash}) - if ( - self.unit.is_leader() - and self.app_peer_data.get("user_hash") != self.generate_user_hash - ): + self.unit_peer_data.update({"user_hash": self.generate_user_hash}) + if self.unit.is_leader(): self.app_peer_data.update({"user_hash": self.generate_user_hash}) return True From 636c16f6241087606e1dcb47eaa62946edff2718 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Tue, 22 Jul 2025 21:54:26 +0300 Subject: [PATCH 36/37] Remove trigger --- lib/charms/postgresql_k8s/v0/postgresql.py | 79 +------------------ scripts/authorisation_rules_observer.py | 75 +++++++++++++----- src/authorisation_rules_observer.py | 24 +++++- src/charm.py | 14 ++-- tests/integration/test_pg_hba.py | 2 +- .../unit/test_authorisation_rules_observer.py | 53 +++++++++++++ 6 files changed, 136 insertions(+), 111 deletions(-) create mode 100644 tests/unit/test_authorisation_rules_observer.py diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index 54453a3623..7ba643aadb 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -35,7 +35,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 53 +LIBPATCH = 54 # Groups to distinguish HBA access ACCESS_GROUP_IDENTITY = "identity_access" @@ -782,83 +782,6 @@ def set_up_database(self) -> None: connection = None cursor = None try: - with self._connect_to_database( - database="template1" - ) as connection, connection.cursor() as cursor: - # Create database function and event trigger to identify users created by PgBouncer. - cursor.execute( - "SELECT TRUE FROM pg_event_trigger WHERE evtname = 'update_pg_hba_on_create_schema';" - ) - if cursor.fetchone() is None: - cursor.execute(""" -CREATE OR REPLACE FUNCTION update_pg_hba() - RETURNS event_trigger - LANGUAGE plpgsql - AS $$ - DECLARE - hba_file TEXT; - copy_command TEXT; - connection_type TEXT; - rec record; - insert_value TEXT; - changes INTEGER = 0; - BEGIN - -- Don't execute on replicas. - IF NOT pg_is_in_recovery() THEN - -- Load the current authorisation rules. - DROP TABLE IF EXISTS pg_hba; - CREATE TEMPORARY TABLE pg_hba (lines TEXT); - SELECT setting INTO hba_file FROM pg_settings WHERE name = 'hba_file'; - IF hba_file IS NOT NULL THEN - copy_command='COPY pg_hba FROM ''' || hba_file || '''' ; - EXECUTE copy_command; - -- Build a list of the relation users and the databases they can access. - DROP TABLE IF EXISTS relation_users; - CREATE TEMPORARY TABLE relation_users AS - SELECT t.user, STRING_AGG(DISTINCT t.database, ',') AS databases FROM( SELECT u.usename AS user, CASE WHEN u.usesuper THEN 'all' ELSE d.datname END AS database FROM ( SELECT usename, usesuper FROM pg_catalog.pg_user WHERE usename NOT IN ('backup', 'monitoring', 'operator', 'postgres', 'replication', 'rewind')) AS u JOIN ( SELECT datname FROM pg_catalog.pg_database WHERE NOT datistemplate ) AS d ON has_database_privilege(u.usename, d.datname, 'CONNECT') ) AS t GROUP BY 1; - IF (SELECT COUNT(lines) FROM pg_hba WHERE lines LIKE 'hostssl %') > 0 THEN - connection_type := 'hostssl'; - ELSE - connection_type := 'host'; - END IF; - -- Add the new users to the pg_hba file. - FOR rec IN SELECT * FROM relation_users - LOOP - insert_value := connection_type || ' ' || rec.databases || ' ' || rec.user || ' 0.0.0.0/0 md5'; - IF (SELECT COUNT(lines) FROM pg_hba WHERE lines = insert_value) = 0 THEN - INSERT INTO pg_hba (lines) VALUES (insert_value); - changes := changes + 1; - END IF; - END LOOP; - -- Remove users that don't exist anymore from the pg_hba file. - FOR rec IN SELECT h.lines FROM pg_hba AS h LEFT JOIN relation_users AS r ON SPLIT_PART(h.lines, ' ', 3) = r.user WHERE r.user IS NULL AND (SPLIT_PART(h.lines, ' ', 3) LIKE 'relation_id_%' OR SPLIT_PART(h.lines, ' ', 3) LIKE 'pgbouncer_auth_relation_%' OR SPLIT_PART(h.lines, ' ', 3) LIKE '%_user_%_%') - LOOP - DELETE FROM pg_hba WHERE lines = rec.lines; - changes := changes + 1; - END LOOP; - -- Apply the changes to the pg_hba file. - IF changes > 0 THEN - copy_command='COPY pg_hba TO ''' || hba_file || '''' ; - EXECUTE copy_command; - PERFORM pg_reload_conf(); - END IF; - END IF; - END IF; - END; - $$; - """) - cursor.execute(""" -CREATE EVENT TRIGGER update_pg_hba_on_create_schema - ON ddl_command_end - WHEN TAG IN ('CREATE SCHEMA') - EXECUTE FUNCTION update_pg_hba(); - """) - cursor.execute(""" -CREATE EVENT TRIGGER update_pg_hba_on_drop_schema - ON ddl_command_end - WHEN TAG IN ('DROP SCHEMA') - EXECUTE FUNCTION update_pg_hba(); - """) with self._connect_to_database() as connection, connection.cursor() as cursor: cursor.execute("SELECT TRUE FROM pg_roles WHERE rolname='admin';") if cursor.fetchone() is None: diff --git a/scripts/authorisation_rules_observer.py b/scripts/authorisation_rules_observer.py index c07f4003e3..e00a73e755 100644 --- a/scripts/authorisation_rules_observer.py +++ b/scripts/authorisation_rules_observer.py @@ -11,20 +11,66 @@ from urllib.parse import urljoin from urllib.request import urlopen +import psycopg2 +import yaml + API_REQUEST_TIMEOUT = 5 PATRONI_CLUSTER_STATUS_ENDPOINT = "cluster" PATRONI_CONFIG_STATUS_ENDPOINT = "config" +PATRONI_CONF_FILE_PATH = "/var/lib/postgresql/data/patroni.yml" + +# File path for the spawned cluster topology observer process to write logs. +LOG_FILE_PATH = "/var/log/authorisation_rules_observer.log" class UnreachableUnitsError(Exception): """Cannot reach any known cluster member.""" -def dispatch(run_cmd, unit, charm_dir): - """Use the input juju-run command to dispatch a :class:`AuthorisationRulesChangeEvent`.""" - dispatch_sub_cmd = "JUJU_DISPATCH_PATH=hooks/authorisation_rules_change {}/dispatch" +def dispatch(run_cmd, unit, charm_dir, custom_event): + """Use the input juju-run command to dispatch a custom event.""" + dispatch_sub_cmd = "JUJU_DISPATCH_PATH=hooks/{} {}/dispatch" # Input is generated by the charm - subprocess.run([run_cmd, "-u", unit, dispatch_sub_cmd.format(charm_dir)]) # noqa: S603 + subprocess.run([run_cmd, "-u", unit, dispatch_sub_cmd.format(custom_event, charm_dir)]) # noqa: S603 + + +def check_for_database_changes(run_cmd, unit, charm_dir, previous_databases): + """Check for changes in the databases. + + If changes are detected, dispatch an event to handle them. + """ + with open(PATRONI_CONF_FILE_PATH) as conf_file: + conf_file_contents = yaml.safe_load(conf_file) + password = conf_file_contents["postgresql"]["authentication"]["superuser"]["password"] + connection = None + try: + # Input is generated by the charm + with ( + psycopg2.connect( + f"dbname='postgres' user='operator' host='localhost'" + f"password='{password}' connect_timeout=1" + ) as connection, + connection.cursor() as cursor, + ): + cursor.execute("SELECT datname,datacl FROM pg_database;") + current_databases = cursor.fetchall() + except psycopg2.Error as e: + with open(LOG_FILE_PATH, "a") as log_file: + log_file.write(f"Failed to retrieve databases: {e}\n") + return previous_databases + else: + # If it's the first time the databases were retrieved, then store it and use + # it for subsequent checks. + if not previous_databases: + previous_databases = current_databases + # If the databases changed, dispatch a charm event to handle this change. + elif current_databases != previous_databases: + previous_databases = current_databases + dispatch(run_cmd, unit, charm_dir, "databases_change") + return previous_databases + finally: + if connection: + connection.close() def main(): @@ -34,7 +80,7 @@ def main(): """ patroni_urls, run_cmd, unit, charm_dir = sys.argv[1:] - previous_authorisation_rules = [] + previous_databases = None urls = [urljoin(url, PATRONI_CLUSTER_STATUS_ENDPOINT) for url in patroni_urls.split(",")] member_name = unit.replace("/", "-") while True: @@ -67,22 +113,9 @@ def main(): break if is_primary: - # Read contents from the pg_hba.conf file. - with open("/var/lib/postgresql/data/pgdata/pg_hba.conf") as file: - current_authorisation_rules = file.read() - current_authorisation_rules = [ - line - for line in current_authorisation_rules.splitlines() - if not line.startswith("#") - ] - # If it's the first time the authorisation rules were retrieved, then store it and use - # it for subsequent checks. - if not previous_authorisation_rules: - previous_authorisation_rules = current_authorisation_rules - # If the authorisation rules changed, dispatch a charm event to handle this change. - elif current_authorisation_rules != previous_authorisation_rules: - previous_authorisation_rules = current_authorisation_rules - dispatch(run_cmd, unit, charm_dir) + previous_databases = check_for_database_changes( + run_cmd, unit, charm_dir, previous_databases + ) # Wait some time before checking again for a authorisation rules change. sleep(30) diff --git a/src/authorisation_rules_observer.py b/src/authorisation_rules_observer.py index 31fba0fed4..9751efcedd 100644 --- a/src/authorisation_rules_observer.py +++ b/src/authorisation_rules_observer.py @@ -8,6 +8,8 @@ import signal import subprocess import typing +from pathlib import Path +from sys import version_info from ops.charm import CharmEvents from ops.framework import EventBase, EventSource, Object @@ -22,17 +24,17 @@ LOG_FILE_PATH = "/var/log/authorisation_rules_observer.log" -class AuthorisationRulesChangeEvent(EventBase): - """A custom event for authorisation rules changes.""" +class DatabasesChangeEvent(EventBase): + """A custom event for databases changes.""" class AuthorisationRulesChangeCharmEvents(CharmEvents): """A CharmEvents extension for authorisation rules changes. - Includes :class:`AuthorisationRulesChangeEvent` in those that can be handled. + Includes :class:`DatabasesChangeEventt` in those that can be handled. """ - authorisation_rules_change = EventSource(AuthorisationRulesChangeEvent) + databases_change = EventSource(DatabasesChangeEvent) class AuthorisationRulesObserver(Object): @@ -74,6 +76,20 @@ def start_authorisation_rules_observer(self): # in a hook context, as Juju will disallow use of juju-run. new_env = os.environ.copy() new_env.pop("JUJU_CONTEXT_ID", None) + # Generate the venv path based on the existing lib path + for loc in new_env["PYTHONPATH"].split(":"): + path = Path(loc) + venv_path = ( + path + / ".." + / "venv" + / "lib" + / f"python{version_info.major}.{version_info.minor}" + / "site-packages" + ) + if path.stem == "lib": + new_env["PYTHONPATH"] = f"{venv_path.resolve()}:{new_env['PYTHONPATH']}" + break urls = [ self._charm._patroni._patroni_url.replace(self._charm.endpoint, endpoint) diff --git a/src/charm.py b/src/charm.py index b9d196ac11..a71be17351 100755 --- a/src/charm.py +++ b/src/charm.py @@ -4,7 +4,6 @@ """Charmed Kubernetes Operator for the PostgreSQL database.""" -import datetime import itertools import json import logging @@ -13,6 +12,7 @@ import shutil import sys import time +from datetime import datetime from hashlib import shake_128 from pathlib import Path from typing import Literal, get_args @@ -222,9 +222,7 @@ def __init__(self, *args): "/usr/bin/juju-exec" if self.model.juju_version.major > 2 else "/usr/bin/juju-run" ) self._observer = AuthorisationRulesObserver(self, run_cmd) - self.framework.observe( - self.on.authorisation_rules_change, self._on_authorisation_rules_change - ) + self.framework.observe(self.on.databases_change, self._on_databases_change) self.framework.observe(self.on.config_changed, self._on_config_changed) self.framework.observe(self.on.leader_elected, self._on_leader_elected) self.framework.observe(self.on[PEER].relation_changed, self._on_peer_relation_changed) @@ -282,9 +280,11 @@ def __init__(self, *args): self, relation_name=TRACING_RELATION_NAME, protocols=[TRACING_PROTOCOL] ) - def _on_authorisation_rules_change(self, _): - """Handle authorisation rules change event.""" - timestamp = datetime.datetime.now() + def _on_databases_change(self, _): + """Handle databases change event.""" + self.update_config() + logger.debug("databases changed") + timestamp = datetime.now() self._peers.data[self.unit].update({"pg_hba_needs_update_timestamp": str(timestamp)}) logger.debug(f"authorisation rules changed at {timestamp}") diff --git a/tests/integration/test_pg_hba.py b/tests/integration/test_pg_hba.py index 95cac54229..e9bef81561 100644 --- a/tests/integration/test_pg_hba.py +++ b/tests/integration/test_pg_hba.py @@ -99,7 +99,7 @@ async def test_pg_hba(ops_test: OpsTest, charm): if connection: connection.close() - sleep(30) + sleep(60) for unit in ops_test.model.applications[DATABASE_APP_NAME].units: try: diff --git a/tests/unit/test_authorisation_rules_observer.py b/tests/unit/test_authorisation_rules_observer.py new file mode 100644 index 0000000000..d71ddedb07 --- /dev/null +++ b/tests/unit/test_authorisation_rules_observer.py @@ -0,0 +1,53 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +from unittest.mock import mock_open, patch, sentinel + +from scripts.authorisation_rules_observer import check_for_database_changes + + +def test_check_for_database_changes(): + with ( + patch("scripts.authorisation_rules_observer.subprocess") as _subprocess, + patch("scripts.authorisation_rules_observer.psycopg2") as _psycopg2, + ): + run_cmd = "run_cmd" + unit = "unit/0" + charm_dir = "charm_dir" + mock = mock_open( + read_data="""postgresql: + authentication: + superuser: + username: test_user + password: test_password""" + ) + with patch("builtins.open", mock, create=True): + _cursor = _psycopg2.connect.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value + _cursor.fetchall.return_value = sentinel.databases + + # Test the first time this function is called. + result = check_for_database_changes(run_cmd, unit, charm_dir, None) + assert result == sentinel.databases + _subprocess.run.assert_not_called() + _psycopg2.connect.assert_called_once_with( + "dbname='postgres' user='operator' host='localhost'password='test_password' connect_timeout=1" + ) + _cursor.execute.assert_called_once_with("SELECT datname,datacl FROM pg_database;") + + # Test when the databases changed. + _cursor.fetchall.return_value = sentinel.databases_changed + result = check_for_database_changes(run_cmd, unit, charm_dir, result) + assert result == sentinel.databases_changed + + _subprocess.run.assert_called_once_with([ + run_cmd, + "-u", + unit, + f"JUJU_DISPATCH_PATH=hooks/databases_change {charm_dir}/dispatch", + ]) + + # Test when the databases haven't changed. + _subprocess.reset_mock() + check_for_database_changes(run_cmd, unit, charm_dir, result) + assert result == sentinel.databases_changed + _subprocess.run.assert_not_called() From d59b9ff335b89f68d63fe876518f280c2d68761a Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Mon, 28 Jul 2025 17:52:00 +0300 Subject: [PATCH 37/37] Blocked test app --- scripts/authorisation_rules_observer.py | 4 +-- .../ha_tests/test_async_replication.py | 18 +++++-------- .../integration/ha_tests/test_replication.py | 2 ++ .../ha_tests/test_self_healing_1.py | 1 + .../ha_tests/test_self_healing_2.py | 1 + tests/integration/ha_tests/test_upgrade.py | 1 + .../ha_tests/test_upgrade_from_stable.py | 1 + .../new_relations/test_new_relations_1.py | 1 - tests/integration/relations/test_relations.py | 23 ++++++++--------- tests/integration/test_db.py | 25 +++++++------------ .../unit/test_authorisation_rules_observer.py | 4 +-- 11 files changed, 35 insertions(+), 46 deletions(-) diff --git a/scripts/authorisation_rules_observer.py b/scripts/authorisation_rules_observer.py index e00a73e755..7a52c7ae6a 100644 --- a/scripts/authorisation_rules_observer.py +++ b/scripts/authorisation_rules_observer.py @@ -47,12 +47,12 @@ def check_for_database_changes(run_cmd, unit, charm_dir, previous_databases): # Input is generated by the charm with ( psycopg2.connect( - f"dbname='postgres' user='operator' host='localhost'" + "dbname='postgres' user='operator' host='localhost' " f"password='{password}' connect_timeout=1" ) as connection, connection.cursor() as cursor, ): - cursor.execute("SELECT datname,datacl FROM pg_database;") + cursor.execute("SELECT datname, datacl FROM pg_database;") current_databases = cursor.fetchall() except psycopg2.Error as e: with open(LOG_FILE_PATH, "a") as log_file: diff --git a/tests/integration/ha_tests/test_async_replication.py b/tests/integration/ha_tests/test_async_replication.py index 99e482409b..6685898505 100644 --- a/tests/integration/ha_tests/test_async_replication.py +++ b/tests/integration/ha_tests/test_async_replication.py @@ -117,18 +117,12 @@ async def test_deploy_async_replication_setup( async with ops_test.fast_forward(), fast_forward(second_model): await gather( - first_model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], - status="active", - timeout=TIMEOUT, - raise_on_error=False, - ), - second_model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], - status="active", - timeout=TIMEOUT, - raise_on_error=False, - ), + first_model.wait_for_idle(apps=[APPLICATION_NAME], status="blocked"), + second_model.wait_for_idle(apps=[APPLICATION_NAME], status="blocked"), + ) + await gather( + first_model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=TIMEOUT), + second_model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=TIMEOUT), ) diff --git a/tests/integration/ha_tests/test_replication.py b/tests/integration/ha_tests/test_replication.py index d17ba442f2..dc0ffcb757 100644 --- a/tests/integration/ha_tests/test_replication.py +++ b/tests/integration/ha_tests/test_replication.py @@ -10,6 +10,7 @@ from ..helpers import ( APPLICATION_NAME, CHARM_BASE, + DATABASE_APP_NAME, app_name, build_and_deploy, db_connect, @@ -47,6 +48,7 @@ async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: ) if wait_for_apps: + await ops_test.model.relate(DATABASE_APP_NAME, f"{APPLICATION_NAME}:database") async with ops_test.fast_forward(): await ops_test.model.wait_for_idle(status="active", timeout=1000, raise_on_error=False) diff --git a/tests/integration/ha_tests/test_self_healing_1.py b/tests/integration/ha_tests/test_self_healing_1.py index 56c7ed1583..2696b3612c 100644 --- a/tests/integration/ha_tests/test_self_healing_1.py +++ b/tests/integration/ha_tests/test_self_healing_1.py @@ -72,6 +72,7 @@ async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: ) if wait_for_apps: + await ops_test.model.relate(DATABASE_APP_NAME, f"{APPLICATION_NAME}:database") await ops_test.model.wait_for_idle( apps=[ APPLICATION_NAME, diff --git a/tests/integration/ha_tests/test_self_healing_2.py b/tests/integration/ha_tests/test_self_healing_2.py index 028fa3844c..f01acd7a9d 100644 --- a/tests/integration/ha_tests/test_self_healing_2.py +++ b/tests/integration/ha_tests/test_self_healing_2.py @@ -53,6 +53,7 @@ async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: ) if wait_for_apps: + await ops_test.model.relate(DATABASE_APP_NAME, f"{APPLICATION_NAME}:database") await ops_test.model.wait_for_idle( apps=[ APPLICATION_NAME, diff --git a/tests/integration/ha_tests/test_upgrade.py b/tests/integration/ha_tests/test_upgrade.py index e49a9b6dd0..03915bf5b1 100644 --- a/tests/integration/ha_tests/test_upgrade.py +++ b/tests/integration/ha_tests/test_upgrade.py @@ -53,6 +53,7 @@ async def test_deploy_latest(ops_test: OpsTest) -> None: base=CHARM_BASE, ), ) + await ops_test.model.relate(DATABASE_APP_NAME, f"{APPLICATION_NAME}:database") logger.info("Wait for applications to become active") async with ops_test.fast_forward(): await ops_test.model.wait_for_idle( diff --git a/tests/integration/ha_tests/test_upgrade_from_stable.py b/tests/integration/ha_tests/test_upgrade_from_stable.py index 46fa2850dc..d83a33e519 100644 --- a/tests/integration/ha_tests/test_upgrade_from_stable.py +++ b/tests/integration/ha_tests/test_upgrade_from_stable.py @@ -49,6 +49,7 @@ async def test_deploy_stable(ops_test: OpsTest) -> None: base=CHARM_BASE, ), ) + await ops_test.model.relate(DATABASE_APP_NAME, f"{APPLICATION_NAME}:database") logger.info("Wait for applications to become active") async with ops_test.fast_forward(): await ops_test.model.wait_for_idle( diff --git a/tests/integration/new_relations/test_new_relations_1.py b/tests/integration/new_relations/test_new_relations_1.py index a7565fa7d1..ac26e09304 100644 --- a/tests/integration/new_relations/test_new_relations_1.py +++ b/tests/integration/new_relations/test_new_relations_1.py @@ -191,7 +191,6 @@ async def test_two_applications_doesnt_share_the_same_relation_data(ops_test: Op base=CHARM_BASE, channel="edge", ) - await ops_test.model.wait_for_idle(apps=all_app_names, status="active") # Relate the new application with the database # and wait for them exchanging some connection data. diff --git a/tests/integration/relations/test_relations.py b/tests/integration/relations/test_relations.py index d345895b52..5a84bdfe6d 100644 --- a/tests/integration/relations/test_relations.py +++ b/tests/integration/relations/test_relations.py @@ -34,7 +34,7 @@ async def test_deploy_charms(ops_test: OpsTest, charm): application_name=APPLICATION_APP_NAME, num_units=1, base=CHARM_BASE, - channel="edge", + channel="latest/edge", ), ops_test.model.deploy( charm, @@ -55,18 +55,13 @@ async def test_deploy_charms(ops_test: OpsTest, charm): ), ) - await ops_test.model.wait_for_idle( - apps=[APP_NAME, APPLICATION_APP_NAME], status="active", timeout=3000 - ) + await ops_test.model.wait_for_idle(apps=[APPLICATION_APP_NAME], status="blocked") + await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=2000) async def test_legacy_and_modern_endpoints_simultaneously(ops_test: OpsTest): await ops_test.model.relate(APPLICATION_APP_NAME, f"{APP_NAME}:{DB_RELATION}") - await ops_test.model.wait_for_idle( - status="active", - timeout=1500, - raise_on_error=False, - ) + await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=1500) logger.info(" add relation with modern endpoints") app = ops_test.model.applications[APP_NAME] @@ -81,7 +76,7 @@ async def test_legacy_and_modern_endpoints_simultaneously(ops_test: OpsTest): await ops_test.model.applications[APP_NAME].destroy_relation( f"{APP_NAME}:{DB_RELATION}", f"{APPLICATION_APP_NAME}:{DB_RELATION}" ) - await ops_test.model.wait_for_idle(status="active", timeout=1500) + await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=1500) logger.info(" add relation with legacy endpoints") async with ops_test.fast_forward(): @@ -95,14 +90,16 @@ async def test_legacy_and_modern_endpoints_simultaneously(ops_test: OpsTest): await ops_test.model.applications[APP_NAME].destroy_relation( f"{APP_NAME}:{DATABASE_RELATION}", f"{APPLICATION_APP_NAME}:{FIRST_DATABASE_RELATION}" ) - await ops_test.model.wait_for_idle(status="active", timeout=1500) + await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=1500) logger.info(" remove relation with legacy endpoints") await ops_test.model.applications[APP_NAME].destroy_relation( f"{APP_NAME}:{DB_RELATION}", f"{APPLICATION_APP_NAME}:{DB_RELATION}" ) - await ops_test.model.wait_for_idle(status="active", timeout=1500) + await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=1500) logger.info(" add relation with modern endpoints") await ops_test.model.relate(APP_NAME, f"{APPLICATION_APP_NAME}:{FIRST_DATABASE_RELATION}") - await ops_test.model.wait_for_idle(status="active", timeout=1500) + await ops_test.model.wait_for_idle( + apps=[APP_NAME, APPLICATION_APP_NAME], status="active", timeout=1500 + ) diff --git a/tests/integration/test_db.py b/tests/integration/test_db.py index 50b31ebd45..edb1f013ba 100644 --- a/tests/integration/test_db.py +++ b/tests/integration/test_db.py @@ -122,8 +122,8 @@ async def test_extensions_blocking(ops_test: OpsTest) -> None: ) await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME, f"{APPLICATION_NAME}2"], - status="active", + apps=[APPLICATION_NAME, f"{APPLICATION_NAME}2"], + status="blocked", timeout=1000, ) @@ -166,11 +166,8 @@ async def test_extensions_blocking(ops_test: OpsTest) -> None: await ops_test.model.applications[DATABASE_APP_NAME].set_config(config) await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active") await ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db") - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], - status="active", - timeout=2000, - ) + await ops_test.model.wait_for_idle(apps=[APPLICATION_NAME], status="blocked") + await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=2000) logger.info("Verifying that the charm unblocks when the extensions are enabled") config = {"plugin_pg_trgm_enable": "False", "plugin_unaccent_enable": "False"} @@ -179,7 +176,7 @@ async def test_extensions_blocking(ops_test: OpsTest) -> None: f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db" ) wait_for_relation_removed_between(ops_test, DATABASE_APP_NAME, APPLICATION_NAME) - await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME, APPLICATION_NAME], status="active") + await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active") await ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db") await ops_test.model.block_until( @@ -188,12 +185,8 @@ async def test_extensions_blocking(ops_test: OpsTest) -> None: config = {"plugin_pg_trgm_enable": "True", "plugin_unaccent_enable": "True"} await ops_test.model.applications[DATABASE_APP_NAME].set_config(config) - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], - status="active", - raise_on_blocked=False, - timeout=2000, - ) + await ops_test.model.wait_for_idle(apps=[APPLICATION_NAME], status="blocked") + await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=2000) # removing relation to test roles await ops_test.model.applications[DATABASE_APP_NAME].destroy_relation( f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db" @@ -207,9 +200,9 @@ async def test_roles_blocking(ops_test: OpsTest) -> None: await ops_test.model.applications[APPLICATION_NAME].set_config(config) await ops_test.model.applications[f"{APPLICATION_NAME}2"].set_config(config) await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME, f"{APPLICATION_NAME}2"], - status="active", + apps=[APPLICATION_NAME, f"{APPLICATION_NAME}2"], status="blocked" ) + await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active") await gather( ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db"), diff --git a/tests/unit/test_authorisation_rules_observer.py b/tests/unit/test_authorisation_rules_observer.py index d71ddedb07..52d799c9d4 100644 --- a/tests/unit/test_authorisation_rules_observer.py +++ b/tests/unit/test_authorisation_rules_observer.py @@ -30,9 +30,9 @@ def test_check_for_database_changes(): assert result == sentinel.databases _subprocess.run.assert_not_called() _psycopg2.connect.assert_called_once_with( - "dbname='postgres' user='operator' host='localhost'password='test_password' connect_timeout=1" + "dbname='postgres' user='operator' host='localhost' password='test_password' connect_timeout=1" ) - _cursor.execute.assert_called_once_with("SELECT datname,datacl FROM pg_database;") + _cursor.execute.assert_called_once_with("SELECT datname, datacl FROM pg_database;") # Test when the databases changed. _cursor.fetchall.return_value = sentinel.databases_changed