diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index 54453a3623..7ba643aadb 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -35,7 +35,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 53 +LIBPATCH = 54 # Groups to distinguish HBA access ACCESS_GROUP_IDENTITY = "identity_access" @@ -782,83 +782,6 @@ def set_up_database(self) -> None: connection = None cursor = None try: - with self._connect_to_database( - database="template1" - ) as connection, connection.cursor() as cursor: - # Create database function and event trigger to identify users created by PgBouncer. - cursor.execute( - "SELECT TRUE FROM pg_event_trigger WHERE evtname = 'update_pg_hba_on_create_schema';" - ) - if cursor.fetchone() is None: - cursor.execute(""" -CREATE OR REPLACE FUNCTION update_pg_hba() - RETURNS event_trigger - LANGUAGE plpgsql - AS $$ - DECLARE - hba_file TEXT; - copy_command TEXT; - connection_type TEXT; - rec record; - insert_value TEXT; - changes INTEGER = 0; - BEGIN - -- Don't execute on replicas. - IF NOT pg_is_in_recovery() THEN - -- Load the current authorisation rules. - DROP TABLE IF EXISTS pg_hba; - CREATE TEMPORARY TABLE pg_hba (lines TEXT); - SELECT setting INTO hba_file FROM pg_settings WHERE name = 'hba_file'; - IF hba_file IS NOT NULL THEN - copy_command='COPY pg_hba FROM ''' || hba_file || '''' ; - EXECUTE copy_command; - -- Build a list of the relation users and the databases they can access. - DROP TABLE IF EXISTS relation_users; - CREATE TEMPORARY TABLE relation_users AS - SELECT t.user, STRING_AGG(DISTINCT t.database, ',') AS databases FROM( SELECT u.usename AS user, CASE WHEN u.usesuper THEN 'all' ELSE d.datname END AS database FROM ( SELECT usename, usesuper FROM pg_catalog.pg_user WHERE usename NOT IN ('backup', 'monitoring', 'operator', 'postgres', 'replication', 'rewind')) AS u JOIN ( SELECT datname FROM pg_catalog.pg_database WHERE NOT datistemplate ) AS d ON has_database_privilege(u.usename, d.datname, 'CONNECT') ) AS t GROUP BY 1; - IF (SELECT COUNT(lines) FROM pg_hba WHERE lines LIKE 'hostssl %') > 0 THEN - connection_type := 'hostssl'; - ELSE - connection_type := 'host'; - END IF; - -- Add the new users to the pg_hba file. - FOR rec IN SELECT * FROM relation_users - LOOP - insert_value := connection_type || ' ' || rec.databases || ' ' || rec.user || ' 0.0.0.0/0 md5'; - IF (SELECT COUNT(lines) FROM pg_hba WHERE lines = insert_value) = 0 THEN - INSERT INTO pg_hba (lines) VALUES (insert_value); - changes := changes + 1; - END IF; - END LOOP; - -- Remove users that don't exist anymore from the pg_hba file. - FOR rec IN SELECT h.lines FROM pg_hba AS h LEFT JOIN relation_users AS r ON SPLIT_PART(h.lines, ' ', 3) = r.user WHERE r.user IS NULL AND (SPLIT_PART(h.lines, ' ', 3) LIKE 'relation_id_%' OR SPLIT_PART(h.lines, ' ', 3) LIKE 'pgbouncer_auth_relation_%' OR SPLIT_PART(h.lines, ' ', 3) LIKE '%_user_%_%') - LOOP - DELETE FROM pg_hba WHERE lines = rec.lines; - changes := changes + 1; - END LOOP; - -- Apply the changes to the pg_hba file. - IF changes > 0 THEN - copy_command='COPY pg_hba TO ''' || hba_file || '''' ; - EXECUTE copy_command; - PERFORM pg_reload_conf(); - END IF; - END IF; - END IF; - END; - $$; - """) - cursor.execute(""" -CREATE EVENT TRIGGER update_pg_hba_on_create_schema - ON ddl_command_end - WHEN TAG IN ('CREATE SCHEMA') - EXECUTE FUNCTION update_pg_hba(); - """) - cursor.execute(""" -CREATE EVENT TRIGGER update_pg_hba_on_drop_schema - ON ddl_command_end - WHEN TAG IN ('DROP SCHEMA') - EXECUTE FUNCTION update_pg_hba(); - """) with self._connect_to_database() as connection, connection.cursor() as cursor: cursor.execute("SELECT TRUE FROM pg_roles WHERE rolname='admin';") if cursor.fetchone() is None: diff --git a/scripts/authorisation_rules_observer.py b/scripts/authorisation_rules_observer.py index c07f4003e3..7a52c7ae6a 100644 --- a/scripts/authorisation_rules_observer.py +++ b/scripts/authorisation_rules_observer.py @@ -11,20 +11,66 @@ from urllib.parse import urljoin from urllib.request import urlopen +import psycopg2 +import yaml + API_REQUEST_TIMEOUT = 5 PATRONI_CLUSTER_STATUS_ENDPOINT = "cluster" PATRONI_CONFIG_STATUS_ENDPOINT = "config" +PATRONI_CONF_FILE_PATH = "/var/lib/postgresql/data/patroni.yml" + +# File path for the spawned cluster topology observer process to write logs. +LOG_FILE_PATH = "/var/log/authorisation_rules_observer.log" class UnreachableUnitsError(Exception): """Cannot reach any known cluster member.""" -def dispatch(run_cmd, unit, charm_dir): - """Use the input juju-run command to dispatch a :class:`AuthorisationRulesChangeEvent`.""" - dispatch_sub_cmd = "JUJU_DISPATCH_PATH=hooks/authorisation_rules_change {}/dispatch" +def dispatch(run_cmd, unit, charm_dir, custom_event): + """Use the input juju-run command to dispatch a custom event.""" + dispatch_sub_cmd = "JUJU_DISPATCH_PATH=hooks/{} {}/dispatch" # Input is generated by the charm - subprocess.run([run_cmd, "-u", unit, dispatch_sub_cmd.format(charm_dir)]) # noqa: S603 + subprocess.run([run_cmd, "-u", unit, dispatch_sub_cmd.format(custom_event, charm_dir)]) # noqa: S603 + + +def check_for_database_changes(run_cmd, unit, charm_dir, previous_databases): + """Check for changes in the databases. + + If changes are detected, dispatch an event to handle them. + """ + with open(PATRONI_CONF_FILE_PATH) as conf_file: + conf_file_contents = yaml.safe_load(conf_file) + password = conf_file_contents["postgresql"]["authentication"]["superuser"]["password"] + connection = None + try: + # Input is generated by the charm + with ( + psycopg2.connect( + "dbname='postgres' user='operator' host='localhost' " + f"password='{password}' connect_timeout=1" + ) as connection, + connection.cursor() as cursor, + ): + cursor.execute("SELECT datname, datacl FROM pg_database;") + current_databases = cursor.fetchall() + except psycopg2.Error as e: + with open(LOG_FILE_PATH, "a") as log_file: + log_file.write(f"Failed to retrieve databases: {e}\n") + return previous_databases + else: + # If it's the first time the databases were retrieved, then store it and use + # it for subsequent checks. + if not previous_databases: + previous_databases = current_databases + # If the databases changed, dispatch a charm event to handle this change. + elif current_databases != previous_databases: + previous_databases = current_databases + dispatch(run_cmd, unit, charm_dir, "databases_change") + return previous_databases + finally: + if connection: + connection.close() def main(): @@ -34,7 +80,7 @@ def main(): """ patroni_urls, run_cmd, unit, charm_dir = sys.argv[1:] - previous_authorisation_rules = [] + previous_databases = None urls = [urljoin(url, PATRONI_CLUSTER_STATUS_ENDPOINT) for url in patroni_urls.split(",")] member_name = unit.replace("/", "-") while True: @@ -67,22 +113,9 @@ def main(): break if is_primary: - # Read contents from the pg_hba.conf file. - with open("/var/lib/postgresql/data/pgdata/pg_hba.conf") as file: - current_authorisation_rules = file.read() - current_authorisation_rules = [ - line - for line in current_authorisation_rules.splitlines() - if not line.startswith("#") - ] - # If it's the first time the authorisation rules were retrieved, then store it and use - # it for subsequent checks. - if not previous_authorisation_rules: - previous_authorisation_rules = current_authorisation_rules - # If the authorisation rules changed, dispatch a charm event to handle this change. - elif current_authorisation_rules != previous_authorisation_rules: - previous_authorisation_rules = current_authorisation_rules - dispatch(run_cmd, unit, charm_dir) + previous_databases = check_for_database_changes( + run_cmd, unit, charm_dir, previous_databases + ) # Wait some time before checking again for a authorisation rules change. sleep(30) diff --git a/src/authorisation_rules_observer.py b/src/authorisation_rules_observer.py index 31fba0fed4..9751efcedd 100644 --- a/src/authorisation_rules_observer.py +++ b/src/authorisation_rules_observer.py @@ -8,6 +8,8 @@ import signal import subprocess import typing +from pathlib import Path +from sys import version_info from ops.charm import CharmEvents from ops.framework import EventBase, EventSource, Object @@ -22,17 +24,17 @@ LOG_FILE_PATH = "/var/log/authorisation_rules_observer.log" -class AuthorisationRulesChangeEvent(EventBase): - """A custom event for authorisation rules changes.""" +class DatabasesChangeEvent(EventBase): + """A custom event for databases changes.""" class AuthorisationRulesChangeCharmEvents(CharmEvents): """A CharmEvents extension for authorisation rules changes. - Includes :class:`AuthorisationRulesChangeEvent` in those that can be handled. + Includes :class:`DatabasesChangeEventt` in those that can be handled. """ - authorisation_rules_change = EventSource(AuthorisationRulesChangeEvent) + databases_change = EventSource(DatabasesChangeEvent) class AuthorisationRulesObserver(Object): @@ -74,6 +76,20 @@ def start_authorisation_rules_observer(self): # in a hook context, as Juju will disallow use of juju-run. new_env = os.environ.copy() new_env.pop("JUJU_CONTEXT_ID", None) + # Generate the venv path based on the existing lib path + for loc in new_env["PYTHONPATH"].split(":"): + path = Path(loc) + venv_path = ( + path + / ".." + / "venv" + / "lib" + / f"python{version_info.major}.{version_info.minor}" + / "site-packages" + ) + if path.stem == "lib": + new_env["PYTHONPATH"] = f"{venv_path.resolve()}:{new_env['PYTHONPATH']}" + break urls = [ self._charm._patroni._patroni_url.replace(self._charm.endpoint, endpoint) diff --git a/src/charm.py b/src/charm.py index aad831c271..a71be17351 100755 --- a/src/charm.py +++ b/src/charm.py @@ -4,7 +4,6 @@ """Charmed Kubernetes Operator for the PostgreSQL database.""" -import datetime import itertools import json import logging @@ -13,6 +12,8 @@ import shutil import sys import time +from datetime import datetime +from hashlib import shake_128 from pathlib import Path from typing import Literal, get_args from urllib.parse import urlparse @@ -221,9 +222,7 @@ def __init__(self, *args): "/usr/bin/juju-exec" if self.model.juju_version.major > 2 else "/usr/bin/juju-run" ) self._observer = AuthorisationRulesObserver(self, run_cmd) - self.framework.observe( - self.on.authorisation_rules_change, self._on_authorisation_rules_change - ) + self.framework.observe(self.on.databases_change, self._on_databases_change) self.framework.observe(self.on.config_changed, self._on_config_changed) self.framework.observe(self.on.leader_elected, self._on_leader_elected) self.framework.observe(self.on[PEER].relation_changed, self._on_peer_relation_changed) @@ -281,9 +280,11 @@ def __init__(self, *args): self, relation_name=TRACING_RELATION_NAME, protocols=[TRACING_PROTOCOL] ) - def _on_authorisation_rules_change(self, _): - """Handle authorisation rules change event.""" - timestamp = datetime.datetime.now() + def _on_databases_change(self, _): + """Handle databases change event.""" + self.update_config() + logger.debug("databases changed") + timestamp = datetime.now() self._peers.data[self.unit].update({"pg_hba_needs_update_timestamp": str(timestamp)}) logger.debug(f"authorisation rules changed at {timestamp}") @@ -580,14 +581,14 @@ def _on_peer_relation_changed(self, event: HookEvent) -> None: # noqa: C901 if self.unit.is_leader(): if self._initialize_cluster(event): logger.debug("Deferring on_peer_relation_changed: Leader initialized cluster") + event.defer() else: logger.debug("_initialized_cluster failed on _peer_relation_changed") return else: logger.debug( - "Deferring on_peer_relation_changed: Cluster must be initialized before members can join" + "Early exit on_peer_relation_changed: Cluster must be initialized before members can join" ) - event.defer() return # If the leader is the one receiving the event, it adds the new members, @@ -2119,6 +2120,9 @@ def update_config(self, is_creating_backup: bool = False) -> bool: self._restart_metrics_service() self._restart_ldap_sync_service() + self.unit_peer_data.update({"user_hash": self.generate_user_hash}) + if self.unit.is_leader(): + self.app_peer_data.update({"user_hash": self.generate_user_hash}) return True def _validate_config_options(self) -> None: @@ -2316,8 +2320,30 @@ def relations_user_databases_map(self) -> dict: user, current_host=self.is_connectivity_enabled ) ) + + # Copy relations users directly instead of waiting for them to be created + for relation in self.model.relations[self.postgresql_client_relation.relation_name]: + user = f"relation_id_{relation.id}" + if user not in user_database_map and ( + database := self.postgresql_client_relation.database_provides.fetch_relation_field( + relation.id, "database" + ) + ): + user_database_map[user] = database return user_database_map + @property + def generate_user_hash(self) -> str: + """Generate expected user and database hash.""" + user_db_pairs = {} + for relation in self.model.relations[self.postgresql_client_relation.relation_name]: + if database := self.postgresql_client_relation.database_provides.fetch_relation_field( + relation.id, "database" + ): + user = f"relation_id_{relation.id}" + user_db_pairs[user] = database + return shake_128(str(user_db_pairs).encode()).hexdigest(16) + def override_patroni_on_failure_condition( self, new_condition: str, repeat_cause: str | None ) -> bool: diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index e2fb4f5c92..84ba570b3f 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -4,7 +4,6 @@ """Postgres client relation hooks & helpers.""" import logging -from datetime import datetime from charms.data_platform_libs.v0.data_interfaces import ( DatabaseProvides, @@ -22,12 +21,8 @@ from ops.charm import CharmBase, RelationBrokenEvent, RelationChangedEvent, RelationDepartedEvent from ops.framework import Object from ops.model import ActiveStatus, BlockedStatus, Relation -from tenacity import RetryError, Retrying, stop_after_attempt, wait_fixed -from constants import ( - DATABASE_PORT, - ENDPOINT_SIMULTANEOUSLY_BLOCKING_MESSAGE, -) +from constants import DATABASE_PORT, ENDPOINT_SIMULTANEOUSLY_BLOCKING_MESSAGE from utils import new_password logger = logging.getLogger(__name__) @@ -93,6 +88,19 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: event.defer() return + self.charm.update_config() + for key in self.charm._peers.data: + # We skip the leader so we don't have to wait on the defer + if ( + key != self.charm.app + and key != self.charm.unit + and self.charm._peers.data[key].get("user_hash", "") + != self.charm.generate_user_hash + ): + logger.debug("Not all units have synced configuration") + event.defer() + return + # Retrieve the database name and extra user roles using the charm library. database = event.database @@ -164,18 +172,6 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: ) return - # Try to wait for pg_hba trigger - try: - for attempt in Retrying(stop=stop_after_attempt(3), wait=wait_fixed(1)): - with attempt: - if not self.charm.postgresql.is_user_in_hba(user): - raise Exception("pg_hba not ready") - self.charm.unit_peer_data.update({ - "pg_hba_needs_update_timestamp": str(datetime.now()) - }) - except RetryError: - logger.warning("database requested: Unable to check pg_hba rule update") - def _on_relation_departed(self, event: RelationDepartedEvent) -> None: """Set a flag to avoid deleting database users when not wanted.""" # Set a flag to avoid deleting database users when this unit diff --git a/tests/integration/ha_tests/helpers.py b/tests/integration/ha_tests/helpers.py index a719564c04..ae329e977f 100644 --- a/tests/integration/ha_tests/helpers.py +++ b/tests/integration/ha_tests/helpers.py @@ -919,14 +919,9 @@ async def start_continuous_writes(ops_test: OpsTest, app: str, model: Model = No ] if not relations: await model.relate(app, f"{APPLICATION_NAME}:database") - await model.wait_for_idle(status="active", timeout=1000) - else: - action = ( - await model.applications[APPLICATION_NAME] - .units[0] - .run_action("start-continuous-writes") + await model.wait_for_idle( + apps=[APPLICATION_NAME, app], status="active", timeout=1000, idle_period=30 ) - await action.wait() for attempt in Retrying(stop=stop_after_delay(60 * 5), wait=wait_fixed(3), reraise=True): with attempt: action = ( diff --git a/tests/integration/ha_tests/test_async_replication.py b/tests/integration/ha_tests/test_async_replication.py index 99e482409b..6685898505 100644 --- a/tests/integration/ha_tests/test_async_replication.py +++ b/tests/integration/ha_tests/test_async_replication.py @@ -117,18 +117,12 @@ async def test_deploy_async_replication_setup( async with ops_test.fast_forward(), fast_forward(second_model): await gather( - first_model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], - status="active", - timeout=TIMEOUT, - raise_on_error=False, - ), - second_model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], - status="active", - timeout=TIMEOUT, - raise_on_error=False, - ), + first_model.wait_for_idle(apps=[APPLICATION_NAME], status="blocked"), + second_model.wait_for_idle(apps=[APPLICATION_NAME], status="blocked"), + ) + await gather( + first_model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=TIMEOUT), + second_model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=TIMEOUT), ) diff --git a/tests/integration/ha_tests/test_replication.py b/tests/integration/ha_tests/test_replication.py index d17ba442f2..dc0ffcb757 100644 --- a/tests/integration/ha_tests/test_replication.py +++ b/tests/integration/ha_tests/test_replication.py @@ -10,6 +10,7 @@ from ..helpers import ( APPLICATION_NAME, CHARM_BASE, + DATABASE_APP_NAME, app_name, build_and_deploy, db_connect, @@ -47,6 +48,7 @@ async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: ) if wait_for_apps: + await ops_test.model.relate(DATABASE_APP_NAME, f"{APPLICATION_NAME}:database") async with ops_test.fast_forward(): await ops_test.model.wait_for_idle(status="active", timeout=1000, raise_on_error=False) diff --git a/tests/integration/ha_tests/test_self_healing_1.py b/tests/integration/ha_tests/test_self_healing_1.py index f972303380..2696b3612c 100644 --- a/tests/integration/ha_tests/test_self_healing_1.py +++ b/tests/integration/ha_tests/test_self_healing_1.py @@ -12,6 +12,7 @@ from ..helpers import ( APPLICATION_NAME, CHARM_BASE, + DATABASE_APP_NAME, METADATA, app_name, build_and_deploy, @@ -71,8 +72,16 @@ async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: ) if wait_for_apps: - async with ops_test.fast_forward(): - await ops_test.model.wait_for_idle(status="active", timeout=1000, raise_on_error=False) + await ops_test.model.relate(DATABASE_APP_NAME, f"{APPLICATION_NAME}:database") + await ops_test.model.wait_for_idle( + apps=[ + APPLICATION_NAME, + DATABASE_APP_NAME, + ], + status="active", + timeout=1000, + idle_period=30, + ) @pytest.mark.abort_on_fail diff --git a/tests/integration/ha_tests/test_self_healing_2.py b/tests/integration/ha_tests/test_self_healing_2.py index 43b7d6a062..f01acd7a9d 100644 --- a/tests/integration/ha_tests/test_self_healing_2.py +++ b/tests/integration/ha_tests/test_self_healing_2.py @@ -8,6 +8,7 @@ from ..helpers import ( APPLICATION_NAME, CHARM_BASE, + DATABASE_APP_NAME, METADATA, app_name, build_and_deploy, @@ -52,8 +53,16 @@ async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: ) if wait_for_apps: - async with ops_test.fast_forward(): - await ops_test.model.wait_for_idle(status="active", timeout=1000, raise_on_error=False) + await ops_test.model.relate(DATABASE_APP_NAME, f"{APPLICATION_NAME}:database") + await ops_test.model.wait_for_idle( + apps=[ + APPLICATION_NAME, + DATABASE_APP_NAME, + ], + status="active", + timeout=1000, + idle_period=30, + ) @pytest.mark.abort_on_fail diff --git a/tests/integration/ha_tests/test_upgrade.py b/tests/integration/ha_tests/test_upgrade.py index e49a9b6dd0..03915bf5b1 100644 --- a/tests/integration/ha_tests/test_upgrade.py +++ b/tests/integration/ha_tests/test_upgrade.py @@ -53,6 +53,7 @@ async def test_deploy_latest(ops_test: OpsTest) -> None: base=CHARM_BASE, ), ) + await ops_test.model.relate(DATABASE_APP_NAME, f"{APPLICATION_NAME}:database") logger.info("Wait for applications to become active") async with ops_test.fast_forward(): await ops_test.model.wait_for_idle( diff --git a/tests/integration/ha_tests/test_upgrade_from_stable.py b/tests/integration/ha_tests/test_upgrade_from_stable.py index 46fa2850dc..d83a33e519 100644 --- a/tests/integration/ha_tests/test_upgrade_from_stable.py +++ b/tests/integration/ha_tests/test_upgrade_from_stable.py @@ -49,6 +49,7 @@ async def test_deploy_stable(ops_test: OpsTest) -> None: base=CHARM_BASE, ), ) + await ops_test.model.relate(DATABASE_APP_NAME, f"{APPLICATION_NAME}:database") logger.info("Wait for applications to become active") async with ops_test.fast_forward(): await ops_test.model.wait_for_idle( diff --git a/tests/integration/new_relations/test_new_relations_1.py b/tests/integration/new_relations/test_new_relations_1.py index 4a94fbcfbf..ac26e09304 100644 --- a/tests/integration/new_relations/test_new_relations_1.py +++ b/tests/integration/new_relations/test_new_relations_1.py @@ -14,11 +14,7 @@ from constants import DATABASE_DEFAULT_NAME -from ..helpers import ( - CHARM_BASE, - check_database_users_existence, - scale_application, -) +from ..helpers import CHARM_BASE, check_database_users_existence, scale_application from .helpers import ( build_connection_string, get_application_relation_data, @@ -54,7 +50,8 @@ async def test_database_relation_with_charm_libraries(ops_test: OpsTest, charm): application_name=APPLICATION_APP_NAME, num_units=2, base=CHARM_BASE, - channel="edge", + channel="latest/edge", + config={"extra_user_roles": "CREATEDB,CREATEROLE"}, ), ops_test.model.deploy( charm, @@ -194,7 +191,6 @@ async def test_two_applications_doesnt_share_the_same_relation_data(ops_test: Op base=CHARM_BASE, channel="edge", ) - await ops_test.model.wait_for_idle(apps=all_app_names, status="active") # Relate the new application with the database # and wait for them exchanging some connection data. @@ -247,7 +243,7 @@ async def test_an_application_can_connect_to_multiple_database_clusters(ops_test f"{APPLICATION_APP_NAME}:{MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME}", ANOTHER_DATABASE_APP_NAME, ) - await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active") + await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active", idle_period=30) # Retrieve the connection string to both database clusters using the relation aliases # and assert they are different. @@ -310,7 +306,9 @@ async def test_an_application_can_request_multiple_databases(ops_test: OpsTest): await ops_test.model.add_relation( f"{APPLICATION_APP_NAME}:{SECOND_DATABASE_RELATION_NAME}", DATABASE_APP_NAME ) - await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active", timeout=15 * 60) + await ops_test.model.wait_for_idle( + apps=APP_NAMES, status="active", timeout=15 * 60, idle_period=30 + ) # Get the connection strings to connect to both databases. for attempt in Retrying(stop=stop_after_attempt(15), wait=wait_fixed(3), reraise=True): @@ -452,7 +450,9 @@ async def test_admin_role(ops_test: OpsTest): all_app_names = [DATA_INTEGRATOR_APP_NAME] all_app_names.extend(APP_NAMES) async with ops_test.fast_forward(): - await ops_test.model.deploy(DATA_INTEGRATOR_APP_NAME, base=CHARM_BASE) + await ops_test.model.deploy( + DATA_INTEGRATOR_APP_NAME, channel="latest/edge", series="jammy" + ) await ops_test.model.wait_for_idle(apps=[DATA_INTEGRATOR_APP_NAME], status="blocked") await ops_test.model.applications[DATA_INTEGRATOR_APP_NAME].set_config({ "database-name": DATA_INTEGRATOR_APP_NAME.replace("-", "_"), @@ -544,7 +544,8 @@ async def test_invalid_extra_user_roles(ops_test: OpsTest): await ops_test.model.deploy( DATA_INTEGRATOR_APP_NAME, application_name=another_data_integrator_app_name, - base=CHARM_BASE, + channel="latest/edge", + series="jammy", ) await ops_test.model.wait_for_idle( apps=[another_data_integrator_app_name], status="blocked" diff --git a/tests/integration/new_relations/test_relations_coherence.py b/tests/integration/new_relations/test_relations_coherence.py index 41b94afd41..7e444d9b2e 100644 --- a/tests/integration/new_relations/test_relations_coherence.py +++ b/tests/integration/new_relations/test_relations_coherence.py @@ -11,7 +11,7 @@ from constants import DATABASE_DEFAULT_NAME -from ..helpers import CHARM_BASE, DATABASE_APP_NAME, build_and_deploy +from ..helpers import DATABASE_APP_NAME, build_and_deploy from .helpers import build_connection_string from .test_new_relations_1 import DATA_INTEGRATOR_APP_NAME @@ -31,7 +31,7 @@ async def test_relations(ops_test: OpsTest, charm): await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=3000) # Creating first time relation with user role - await ops_test.model.deploy(DATA_INTEGRATOR_APP_NAME, base=CHARM_BASE) + await ops_test.model.deploy(DATA_INTEGRATOR_APP_NAME, series="jammy") await ops_test.model.applications[DATA_INTEGRATOR_APP_NAME].set_config({ "database-name": DATA_INTEGRATOR_APP_NAME.replace("-", "_"), }) diff --git a/tests/integration/relations/test_relations.py b/tests/integration/relations/test_relations.py index d345895b52..5a84bdfe6d 100644 --- a/tests/integration/relations/test_relations.py +++ b/tests/integration/relations/test_relations.py @@ -34,7 +34,7 @@ async def test_deploy_charms(ops_test: OpsTest, charm): application_name=APPLICATION_APP_NAME, num_units=1, base=CHARM_BASE, - channel="edge", + channel="latest/edge", ), ops_test.model.deploy( charm, @@ -55,18 +55,13 @@ async def test_deploy_charms(ops_test: OpsTest, charm): ), ) - await ops_test.model.wait_for_idle( - apps=[APP_NAME, APPLICATION_APP_NAME], status="active", timeout=3000 - ) + await ops_test.model.wait_for_idle(apps=[APPLICATION_APP_NAME], status="blocked") + await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=2000) async def test_legacy_and_modern_endpoints_simultaneously(ops_test: OpsTest): await ops_test.model.relate(APPLICATION_APP_NAME, f"{APP_NAME}:{DB_RELATION}") - await ops_test.model.wait_for_idle( - status="active", - timeout=1500, - raise_on_error=False, - ) + await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=1500) logger.info(" add relation with modern endpoints") app = ops_test.model.applications[APP_NAME] @@ -81,7 +76,7 @@ async def test_legacy_and_modern_endpoints_simultaneously(ops_test: OpsTest): await ops_test.model.applications[APP_NAME].destroy_relation( f"{APP_NAME}:{DB_RELATION}", f"{APPLICATION_APP_NAME}:{DB_RELATION}" ) - await ops_test.model.wait_for_idle(status="active", timeout=1500) + await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=1500) logger.info(" add relation with legacy endpoints") async with ops_test.fast_forward(): @@ -95,14 +90,16 @@ async def test_legacy_and_modern_endpoints_simultaneously(ops_test: OpsTest): await ops_test.model.applications[APP_NAME].destroy_relation( f"{APP_NAME}:{DATABASE_RELATION}", f"{APPLICATION_APP_NAME}:{FIRST_DATABASE_RELATION}" ) - await ops_test.model.wait_for_idle(status="active", timeout=1500) + await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=1500) logger.info(" remove relation with legacy endpoints") await ops_test.model.applications[APP_NAME].destroy_relation( f"{APP_NAME}:{DB_RELATION}", f"{APPLICATION_APP_NAME}:{DB_RELATION}" ) - await ops_test.model.wait_for_idle(status="active", timeout=1500) + await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=1500) logger.info(" add relation with modern endpoints") await ops_test.model.relate(APP_NAME, f"{APPLICATION_APP_NAME}:{FIRST_DATABASE_RELATION}") - await ops_test.model.wait_for_idle(status="active", timeout=1500) + await ops_test.model.wait_for_idle( + apps=[APP_NAME, APPLICATION_APP_NAME], status="active", timeout=1500 + ) diff --git a/tests/integration/test_db.py b/tests/integration/test_db.py index 50b31ebd45..edb1f013ba 100644 --- a/tests/integration/test_db.py +++ b/tests/integration/test_db.py @@ -122,8 +122,8 @@ async def test_extensions_blocking(ops_test: OpsTest) -> None: ) await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME, f"{APPLICATION_NAME}2"], - status="active", + apps=[APPLICATION_NAME, f"{APPLICATION_NAME}2"], + status="blocked", timeout=1000, ) @@ -166,11 +166,8 @@ async def test_extensions_blocking(ops_test: OpsTest) -> None: await ops_test.model.applications[DATABASE_APP_NAME].set_config(config) await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active") await ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db") - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], - status="active", - timeout=2000, - ) + await ops_test.model.wait_for_idle(apps=[APPLICATION_NAME], status="blocked") + await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=2000) logger.info("Verifying that the charm unblocks when the extensions are enabled") config = {"plugin_pg_trgm_enable": "False", "plugin_unaccent_enable": "False"} @@ -179,7 +176,7 @@ async def test_extensions_blocking(ops_test: OpsTest) -> None: f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db" ) wait_for_relation_removed_between(ops_test, DATABASE_APP_NAME, APPLICATION_NAME) - await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME, APPLICATION_NAME], status="active") + await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active") await ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db") await ops_test.model.block_until( @@ -188,12 +185,8 @@ async def test_extensions_blocking(ops_test: OpsTest) -> None: config = {"plugin_pg_trgm_enable": "True", "plugin_unaccent_enable": "True"} await ops_test.model.applications[DATABASE_APP_NAME].set_config(config) - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], - status="active", - raise_on_blocked=False, - timeout=2000, - ) + await ops_test.model.wait_for_idle(apps=[APPLICATION_NAME], status="blocked") + await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=2000) # removing relation to test roles await ops_test.model.applications[DATABASE_APP_NAME].destroy_relation( f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db" @@ -207,9 +200,9 @@ async def test_roles_blocking(ops_test: OpsTest) -> None: await ops_test.model.applications[APPLICATION_NAME].set_config(config) await ops_test.model.applications[f"{APPLICATION_NAME}2"].set_config(config) await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME, f"{APPLICATION_NAME}2"], - status="active", + apps=[APPLICATION_NAME, f"{APPLICATION_NAME}2"], status="blocked" ) + await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active") await gather( ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db"), diff --git a/tests/integration/test_pg_hba.py b/tests/integration/test_pg_hba.py index dbcac32f82..e9bef81561 100644 --- a/tests/integration/test_pg_hba.py +++ b/tests/integration/test_pg_hba.py @@ -51,7 +51,7 @@ async def test_pg_hba(ops_test: OpsTest, charm): await ops_test.model.add_relation(DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME) await ops_test.model.wait_for_idle( - apps=[DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME], status="active" + apps=[DATA_INTEGRATOR_APP_NAME, DATABASE_APP_NAME], status="active", timeout=1000 ) primary = await get_primary(ops_test) @@ -99,7 +99,7 @@ async def test_pg_hba(ops_test: OpsTest, charm): if connection: connection.close() - sleep(30) + sleep(60) for unit in ops_test.model.applications[DATABASE_APP_NAME].units: try: diff --git a/tests/unit/test_authorisation_rules_observer.py b/tests/unit/test_authorisation_rules_observer.py new file mode 100644 index 0000000000..52d799c9d4 --- /dev/null +++ b/tests/unit/test_authorisation_rules_observer.py @@ -0,0 +1,53 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +from unittest.mock import mock_open, patch, sentinel + +from scripts.authorisation_rules_observer import check_for_database_changes + + +def test_check_for_database_changes(): + with ( + patch("scripts.authorisation_rules_observer.subprocess") as _subprocess, + patch("scripts.authorisation_rules_observer.psycopg2") as _psycopg2, + ): + run_cmd = "run_cmd" + unit = "unit/0" + charm_dir = "charm_dir" + mock = mock_open( + read_data="""postgresql: + authentication: + superuser: + username: test_user + password: test_password""" + ) + with patch("builtins.open", mock, create=True): + _cursor = _psycopg2.connect.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value + _cursor.fetchall.return_value = sentinel.databases + + # Test the first time this function is called. + result = check_for_database_changes(run_cmd, unit, charm_dir, None) + assert result == sentinel.databases + _subprocess.run.assert_not_called() + _psycopg2.connect.assert_called_once_with( + "dbname='postgres' user='operator' host='localhost' password='test_password' connect_timeout=1" + ) + _cursor.execute.assert_called_once_with("SELECT datname, datacl FROM pg_database;") + + # Test when the databases changed. + _cursor.fetchall.return_value = sentinel.databases_changed + result = check_for_database_changes(run_cmd, unit, charm_dir, result) + assert result == sentinel.databases_changed + + _subprocess.run.assert_called_once_with([ + run_cmd, + "-u", + unit, + f"JUJU_DISPATCH_PATH=hooks/databases_change {charm_dir}/dispatch", + ]) + + # Test when the databases haven't changed. + _subprocess.reset_mock() + check_for_database_changes(run_cmd, unit, charm_dir, result) + assert result == sentinel.databases_changed + _subprocess.run.assert_not_called() diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index a624dab832..84759e94b3 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -1425,7 +1425,7 @@ def test_on_peer_relation_changed(harness): harness.set_can_connect(POSTGRESQL_CONTAINER, True) relation = harness.model.get_relation(PEER, rel_id) harness.charm.on.database_peers_relation_changed.emit(relation) - _defer.assert_called_once() + assert not _defer.called _add_members.assert_not_called() _update_config.assert_not_called() _coordinate_stanza_fields.assert_not_called()