diff --git a/config.yaml b/config.yaml index 86da6cbc82..2cf3d4e3cb 100644 --- a/config.yaml +++ b/config.yaml @@ -69,6 +69,12 @@ options: Enable synchronized sequential scans. type: boolean default: true + ldap_map: + description: | + List of mapped LDAP group names to PostgreSQL group names, separated by commas. + The map is used to assign LDAP synchronized users to PostgreSQL authorization groups. + Example: =,= + type: string ldap_search_filter: description: | The LDAP search filter to match users with. diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index 41be98a04f..5975197f1b 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -35,7 +35,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 47 +LIBPATCH = 48 # Groups to distinguish HBA access ACCESS_GROUP_IDENTITY = "identity_access" @@ -773,6 +773,42 @@ def is_restart_pending(self) -> bool: if connection: connection.close() + @staticmethod + def build_postgresql_group_map(group_map: Optional[str]) -> List[Tuple]: + """Build the PostgreSQL authorization group-map. + + Args: + group_map: serialized group-map with the following format: + =, + =, + ... + + Returns: + List of LDAP group to PostgreSQL group tuples. + """ + if group_map is None: + return [] + + group_mappings = group_map.split(",") + group_mappings = (mapping.strip() for mapping in group_mappings) + group_map_list = [] + + for mapping in group_mappings: + mapping_parts = mapping.split("=") + if len(mapping_parts) != 2: + raise ValueError("The group-map must contain value pairs split by commas") + + ldap_group = mapping_parts[0] + psql_group = mapping_parts[1] + + if psql_group in [*ACCESS_GROUPS, PERMISSIONS_GROUP_ADMIN]: + logger.warning(f"Tried to assign LDAP users to forbidden group: {psql_group}") + continue + + group_map_list.append((ldap_group, psql_group)) + + return group_map_list + @staticmethod def build_postgresql_parameters( config_options: dict, available_memory: int, limit_memory: Optional[int] = None @@ -852,3 +888,34 @@ def validate_date_style(self, date_style: str) -> bool: return True except psycopg2.Error: return False + + def validate_group_map(self, group_map: Optional[str]) -> bool: + """Validate the PostgreSQL authorization group-map. + + Args: + group_map: serialized group-map with the following format: + =, + =, + ... + + Returns: + Whether the group-map is valid. + """ + if group_map is None: + return True + + try: + group_map = self.build_postgresql_group_map(group_map) + except ValueError: + return False + + for _, psql_group in group_map: + with self._connect_to_database() as connection, connection.cursor() as cursor: + query = SQL("SELECT TRUE FROM pg_roles WHERE rolname={};") + query = query.format(Literal(psql_group)) + cursor.execute(query) + + if cursor.fetchone() is None: + return False + + return True diff --git a/src/backups.py b/src/backups.py index 2bdc776374..d9a1fee86e 100644 --- a/src/backups.py +++ b/src/backups.py @@ -989,7 +989,7 @@ def _on_restore_action(self, event): # noqa: C901 # Stop the database service before performing the restore. logger.info("Stopping database service") try: - self.container.stop(self.charm._postgresql_service) + self.container.stop(self.charm.postgresql_service) except ChangeError as e: error_message = f"Failed to stop database service with error: {e!s}" logger.error(f"Restore failed: {error_message}") @@ -1047,7 +1047,7 @@ def _on_restore_action(self, event): # noqa: C901 # Start the database to start the restore process. logger.info("Configuring Patroni to restore the backup") - self.container.start(self.charm._postgresql_service) + self.container.start(self.charm.postgresql_service) event.set_results({"restore-status": "restore started"}) @@ -1221,7 +1221,7 @@ def _restart_database(self) -> None: """Removes the restoring backup flag and restart the database.""" self.charm.app_peer_data.update({"restoring-backup": "", "restore-to-time": ""}) self.charm.update_config() - self.container.start(self.charm._postgresql_service) + self.container.start(self.charm.postgresql_service) def _retrieve_s3_parameters(self) -> tuple[dict, list[str]]: """Retrieve S3 parameters from the S3 integrator relation.""" diff --git a/src/charm.py b/src/charm.py index 563835255d..c2f1e5699c 100755 --- a/src/charm.py +++ b/src/charm.py @@ -14,6 +14,7 @@ import time from pathlib import Path from typing import Literal, get_args +from urllib.parse import urlparse # First platform-specific import, will fail on wrong architecture try: @@ -35,6 +36,7 @@ from charms.grafana_k8s.v0.grafana_dashboard import GrafanaDashboardProvider from charms.loki_k8s.v1.loki_push_api import LogProxyConsumer from charms.postgresql_k8s.v0.postgresql import ( + ACCESS_GROUP_IDENTITY, ACCESS_GROUPS, REQUIRED_PLUGINS, PostgreSQL, @@ -88,6 +90,7 @@ APP_SCOPE, BACKUP_USER, DATABASE_DEFAULT_NAME, + DATABASE_PORT, METRICS_PORT, MONITORING_PASSWORD_KEY, MONITORING_USER, @@ -193,10 +196,11 @@ def __init__(self, *args): deleted_label=SECRET_DELETED_LABEL, ) - self._postgresql_service = "postgresql" + self.postgresql_service = "postgresql" self.rotate_logs_service = "rotate-logs" self.pgbackrest_server_service = "pgbackrest server" - self._metrics_service = "metrics_server" + self.ldap_sync_service = "ldap-sync" + self.metrics_service = "metrics_server" self._unit = self.model.unit.name self._name = self.model.app.name self._namespace = self.model.name @@ -586,7 +590,7 @@ def _on_peer_relation_changed(self, event: HookEvent) -> None: # noqa: C901 logger.debug("on_peer_relation_changed early exit: Unit in blocked status") return - services = container.pebble.get_services(names=[self._postgresql_service]) + services = container.pebble.get_services(names=[self.postgresql_service]) if ( (self.is_cluster_restoring_backup or self.is_cluster_restoring_to_time) and len(services) > 0 @@ -1463,7 +1467,7 @@ def _on_update_status(self, _) -> None: if not self._on_update_status_early_exit_checks(container): return - services = container.pebble.get_services(names=[self._postgresql_service]) + services = container.pebble.get_services(names=[self.postgresql_service]) if len(services) == 0: # Service has not been added nor started yet, so don't try to check Patroni API. logger.debug("on_update_status early exit: Service has not been added nor started yet") @@ -1476,10 +1480,10 @@ def _on_update_status(self, _) -> None: and services[0].current != ServiceStatus.ACTIVE ): logger.warning( - f"{self._postgresql_service} pebble service inactive, restarting service" + f"{self.postgresql_service} pebble service inactive, restarting service" ) try: - container.restart(self._postgresql_service) + container.restart(self.postgresql_service) except ChangeError: logger.exception("Failed to restart patroni") # If service doesn't recover fast, exit and wait for next hook run to re-check @@ -1576,7 +1580,7 @@ def _handle_processes_failures(self) -> bool: # https://github.com/canonical/pebble/issues/149 is resolved. if not self._patroni.member_started and self._patroni.is_database_running: try: - container.restart(self._postgresql_service) + container.restart(self.postgresql_service) logger.info("restarted Patroni because it was not running") except ChangeError: logger.error("failed to restart Patroni after checking that it was not running") @@ -1713,6 +1717,40 @@ def _update_endpoints( endpoints.remove(endpoint) self._peers.data[self.app]["endpoints"] = json.dumps(endpoints) + def _generate_ldap_service(self) -> dict: + """Generate the LDAP service definition.""" + ldap_params = self.get_ldap_parameters() + + ldap_url = urlparse(ldap_params["ldapurl"]) + ldap_host = ldap_url.hostname + ldap_port = ldap_url.port + + ldap_base_dn = ldap_params["ldapbasedn"] + ldap_bind_username = ldap_params["ldapbinddn"] + ldap_bing_password = ldap_params["ldapbindpasswd"] + ldap_group_mappings = self.postgresql.build_postgresql_group_map(self.config.ldap_map) + + return { + "override": "replace", + "summary": "synchronize LDAP users", + "command": "/start-ldap-synchronizer.sh", + "startup": "enabled", + "environment": { + "LDAP_HOST": ldap_host, + "LDAP_PORT": ldap_port, + "LDAP_BASE_DN": ldap_base_dn, + "LDAP_BIND_USERNAME": ldap_bind_username, + "LDAP_BIND_PASSWORD": ldap_bing_password, + "LDAP_GROUP_IDENTITY": json.dumps(ACCESS_GROUP_IDENTITY), + "LDAP_GROUP_MAPPINGS": json.dumps(ldap_group_mappings), + "POSTGRES_HOST": "127.0.0.1", + "POSTGRES_PORT": DATABASE_PORT, + "POSTGRES_DATABASE": DATABASE_DEFAULT_NAME, + "POSTGRES_USERNAME": USER, + "POSTGRES_PASSWORD": self.get_secret(APP_SCOPE, USER_PASSWORD_KEY), + }, + } + def _generate_metrics_service(self) -> dict: """Generate the metrics service definition.""" return { @@ -1724,7 +1762,7 @@ def _generate_metrics_service(self) -> dict: if self.get_secret("app", MONITORING_PASSWORD_KEY) is not None else "disabled" ), - "after": [self._postgresql_service], + "after": [self.postgresql_service], "user": WORKLOAD_OS_USER, "group": WORKLOAD_OS_GROUP, "environment": { @@ -1743,7 +1781,7 @@ def _postgresql_layer(self) -> Layer: "summary": "postgresql + patroni layer", "description": "pebble config layer for postgresql + patroni", "services": { - self._postgresql_service: { + self.postgresql_service: { "override": "replace", "summary": "entrypoint of the postgresql + patroni image", "command": f"patroni {self._storage_path}/patroni.yml", @@ -1773,7 +1811,13 @@ def _postgresql_layer(self) -> Layer: "user": WORKLOAD_OS_USER, "group": WORKLOAD_OS_GROUP, }, - self._metrics_service: self._generate_metrics_service(), + self.ldap_sync_service: { + "override": "replace", + "summary": "synchronize LDAP users", + "command": "/start-ldap-synchronizer.sh", + "startup": "disabled", + }, + self.metrics_service: self._generate_metrics_service(), self.rotate_logs_service: { "override": "replace", "summary": "rotate logs", @@ -1782,7 +1826,7 @@ def _postgresql_layer(self) -> Layer: }, }, "checks": { - self._postgresql_service: { + self.postgresql_service: { "override": "replace", "level": "ready", "http": { @@ -1885,6 +1929,51 @@ def _restart(self, event: RunWithLock) -> None: # Start or stop the pgBackRest TLS server service when TLS certificate change. self.backup.start_stop_pgbackrest_service() + def _restart_metrics_service(self) -> None: + """Restart the monitoring service if the password was rotated.""" + container = self.unit.get_container("postgresql") + current_layer = container.get_plan() + + metrics_service = current_layer.services[self.metrics_service] + data_source_name = metrics_service.environment.get("DATA_SOURCE_NAME", "") + + if metrics_service and not data_source_name.startswith( + f"user={MONITORING_USER} password={self.get_secret('app', MONITORING_PASSWORD_KEY)} " + ): + container.add_layer( + self.metrics_service, + Layer({"services": {self.metrics_service: self._generate_metrics_service()}}), + combine=True, + ) + container.restart(self.metrics_service) + + def _restart_ldap_sync_service(self) -> None: + """Restart the LDAP sync service in case any configuration changed.""" + if not self._patroni.member_started: + logger.debug("Restart LDAP sync early exit: Patroni has not started yet") + return + + container = self.unit.get_container("postgresql") + sync_service = container.pebble.get_services(names=[self.ldap_sync_service]) + + if not self.is_primary and sync_service[0].is_running(): + logger.debug("Stopping LDAP sync service. It must only run in the primary") + container.stop(self.pg_ldap_sync_service) + + if self.is_primary and not self.is_ldap_enabled: + logger.debug("Stopping LDAP sync service") + container.stop(self.ldap_sync_service) + return + + if self.is_primary and self.is_ldap_enabled: + container.add_layer( + self.ldap_sync_service, + Layer({"services": {self.ldap_sync_service: self._generate_ldap_service()}}), + combine=True, + ) + logger.debug("Starting LDAP sync service") + container.restart(self.ldap_sync_service) + @property def _is_workload_running(self) -> bool: """Returns whether the workload is running (in an active state).""" @@ -1892,7 +1981,7 @@ def _is_workload_running(self) -> bool: if not container.can_connect(): return False - services = container.pebble.get_services(names=[self._postgresql_service]) + services = container.pebble.get_services(names=[self.postgresql_service]) if len(services) == 0: return False @@ -1982,21 +2071,8 @@ def update_config(self, is_creating_backup: bool = False) -> bool: }) self._handle_postgresql_restart_need() - - # Restart the monitoring service if the password was rotated - container = self.unit.get_container("postgresql") - current_layer = container.get_plan() - if ( - metrics_service := current_layer.services[self._metrics_service] - ) and not metrics_service.environment.get("DATA_SOURCE_NAME", "").startswith( - f"user={MONITORING_USER} password={self.get_secret('app', MONITORING_PASSWORD_KEY)} " - ): - container.add_layer( - self._metrics_service, - Layer({"services": {self._metrics_service: self._generate_metrics_service()}}), - combine=True, - ) - container.restart(self._metrics_service) + self._restart_metrics_service() + self._restart_ldap_sync_service() return True @@ -2010,6 +2086,9 @@ def _validate_config_options(self) -> None: "instance_default_text_search_config config option has an invalid value" ) + if not self.postgresql.validate_group_map(self.config.ldap_map): + raise ValueError("ldap_map config option has an invalid value") + if not self.postgresql.validate_date_style(self.config.request_date_style): raise ValueError("request_date_style config option has an invalid value") @@ -2081,14 +2160,14 @@ def _update_pebble_layers(self, replan: bool = True) -> None: # Check if there are any changes to layer services. if current_layer.services != new_layer.services: # Changes were made, add the new layer. - container.add_layer(self._postgresql_service, new_layer, combine=True) + container.add_layer(self.postgresql_service, new_layer, combine=True) logging.info("Added updated layer 'postgresql' to Pebble plan") if replan: container.replan() logging.info("Restarted postgresql service") if current_layer.checks != new_layer.checks: # Changes were made, add the new layer. - container.add_layer(self._postgresql_service, new_layer, combine=True) + container.add_layer(self.postgresql_service, new_layer, combine=True) logging.info("Updated health checks") def _unit_name_to_pod_name(self, unit_name: str) -> str: diff --git a/src/config.py b/src/config.py index 9e169efa95..9932a06d89 100644 --- a/src/config.py +++ b/src/config.py @@ -27,6 +27,7 @@ class CharmConfig(BaseConfigModel): instance_max_locks_per_transaction: int | None instance_password_encryption: str | None instance_synchronize_seqscans: bool | None + ldap_map: str | None ldap_search_filter: str | None logging_client_min_messages: str | None logging_log_connections: bool | None diff --git a/src/relations/async_replication.py b/src/relations/async_replication.py index 1700de12b8..9d34409de0 100644 --- a/src/relations/async_replication.py +++ b/src/relations/async_replication.py @@ -524,7 +524,7 @@ def _on_async_relation_changed(self, event: RelationChangedEvent) -> None: if ( not self.container.can_connect() - or len(self.container.pebble.get_services(names=[self.charm._postgresql_service])) == 0 + or len(self.container.pebble.get_services(names=[self.charm.postgresql_service])) == 0 ): logger.debug("Early exit on_async_relation_changed: container hasn't started yet.") event.defer() @@ -532,7 +532,7 @@ def _on_async_relation_changed(self, event: RelationChangedEvent) -> None: # Update the asynchronous replication configuration and start the database. self.charm.update_config() - self.container.start(self.charm._postgresql_service) + self.container.start(self.charm.postgresql_service) self._handle_database_start(event) @@ -694,7 +694,7 @@ def _stop_database(self, event: RelationChangedEvent) -> bool: logger.debug("Early exit on_async_relation_changed: following promoted cluster.") return False - self.container.stop(self.charm._postgresql_service) + self.container.stop(self.charm.postgresql_service) if self.charm.unit.is_leader(): # Remove the "cluster_initialised" flag to avoid self-healing in the update status hook. diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 2c0324b192..6991e03a2c 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -31,6 +31,7 @@ POSTGRESQL_CONTAINER = "postgresql" POSTGRESQL_SERVICE = "postgresql" +LDAP_SYNC_SERVICE = "ldap-sync" METRICS_SERVICE = "metrics_server" PGBACKREST_SERVER_SERVICE = "pgbackrest server" ROTATE_LOGS_SERVICE = "rotate-logs" @@ -949,6 +950,20 @@ def test_postgresql_layer(harness): "PATRONI_SUPERUSER_USERNAME": "operator", }, }, + PGBACKREST_SERVER_SERVICE: { + "override": "replace", + "summary": "pgBackRest server", + "command": PGBACKREST_SERVER_SERVICE, + "startup": "disabled", + "user": "postgres", + "group": "postgres", + }, + LDAP_SYNC_SERVICE: { + "override": "replace", + "summary": "synchronize LDAP users", + "command": "/start-ldap-synchronizer.sh", + "startup": "disabled", + }, METRICS_SERVICE: { "override": "replace", "summary": "postgresql metrics exporter", @@ -965,14 +980,6 @@ def test_postgresql_layer(harness): ), }, }, - PGBACKREST_SERVER_SERVICE: { - "override": "replace", - "summary": "pgBackRest server", - "command": PGBACKREST_SERVER_SERVICE, - "startup": "disabled", - "user": "postgres", - "group": "postgres", - }, ROTATE_LOGS_SERVICE: { "override": "replace", "summary": "rotate logs", @@ -1089,6 +1096,7 @@ def test_validate_config_options(harness): harness.set_can_connect(POSTGRESQL_CONTAINER, True) _charm_lib.return_value.get_postgresql_text_search_configs.return_value = [] _charm_lib.return_value.validate_date_style.return_value = [] + _charm_lib.return_value.validate_group_map.return_value = False _charm_lib.return_value.get_postgresql_timezones.return_value = [] # Test instance_default_text_search_config exception @@ -1106,6 +1114,17 @@ def test_validate_config_options(harness): "pg_catalog.test" ] + # Test ldap_map exception + with harness.hooks_disabled(): + harness.update_config({"ldap_map": "ldap_group="}) + + with tc.assertRaises(ValueError) as e: + harness.charm._validate_config_options() + assert e.msg == "ldap_map config option has an invalid value" + + _charm_lib.return_value.validate_group_map.assert_called_once_with("ldap_group=") + _charm_lib.return_value.validate_group_map.return_value = True + # Test request_date_style exception with harness.hooks_disabled(): harness.update_config({"request_date_style": "ISO, TEST"}) @@ -1128,10 +1147,6 @@ def test_validate_config_options(harness): _charm_lib.return_value.get_postgresql_timezones.assert_called_once_with() _charm_lib.return_value.get_postgresql_timezones.return_value = ["TEST_ZONE"] - # - # Secrets - # - def test_scope_obj(harness): assert harness.charm._scope_obj("app") == harness.charm.framework.model.app @@ -1552,6 +1567,12 @@ def test_update_config(harness): patch( "charm.PostgresqlOperatorCharm._handle_postgresql_restart_need" ) as _handle_postgresql_restart_need, + patch( + "charm.PostgresqlOperatorCharm._restart_metrics_service" + ) as _restart_metrics_service, + patch( + "charm.PostgresqlOperatorCharm._restart_ldap_sync_service" + ) as _restart_ldap_sync_service, patch("charm.Patroni.bulk_update_parameters_controller_by_patroni"), patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, patch( @@ -1559,6 +1580,7 @@ def test_update_config(harness): ) as _is_workload_running, patch("charm.Patroni.render_patroni_yml_file") as _render_patroni_yml_file, patch("charm.PostgreSQLUpgrade") as _upgrade, + patch("charm.PostgresqlOperatorCharm.is_primary", return_value=False), patch( "charm.PostgresqlOperatorCharm.is_tls_enabled", new_callable=PropertyMock ) as _is_tls_enabled, @@ -1593,10 +1615,14 @@ def test_update_config(harness): parameters={"test": "test"}, ) _handle_postgresql_restart_need.assert_called_once() + _restart_metrics_service.assert_called_once() + _restart_ldap_sync_service.assert_called_once() assert "tls" not in harness.get_relation_data(rel_id, harness.charm.unit.name) # Test with TLS files available. _handle_postgresql_restart_need.reset_mock() + _restart_metrics_service.reset_mock() + _restart_ldap_sync_service.reset_mock() harness.update_relation_data( rel_id, harness.charm.unit.name, {"tls": ""} ) # Mock some data in the relation to test that it change. @@ -1618,6 +1644,8 @@ def test_update_config(harness): parameters={"test": "test"}, ) _handle_postgresql_restart_need.assert_called_once() + _restart_metrics_service.assert_called_once() + _restart_ldap_sync_service.assert_called_once() assert "tls" not in harness.get_relation_data( rel_id, harness.charm.unit.name ) # The "tls" flag is set in handle_postgresql_restart_need. @@ -1627,8 +1655,12 @@ def test_update_config(harness): rel_id, harness.charm.unit.name, {"tls": ""} ) # Mock some data in the relation to test that it change. _handle_postgresql_restart_need.reset_mock() + _restart_metrics_service.reset_mock() + _restart_ldap_sync_service.reset_mock() harness.charm.update_config() _handle_postgresql_restart_need.assert_not_called() + _restart_metrics_service.assert_not_called() + _restart_ldap_sync_service.assert_not_called() assert harness.get_relation_data(rel_id, harness.charm.unit.name)["tls"] == "enabled" # Test with member not started yet. @@ -1638,6 +1670,8 @@ def test_update_config(harness): _is_tls_enabled.return_value = False harness.charm.update_config() _handle_postgresql_restart_need.assert_not_called() + _restart_metrics_service.assert_not_called() + _restart_ldap_sync_service.assert_not_called() assert "tls" not in harness.get_relation_data(rel_id, harness.charm.unit.name) diff --git a/tests/unit/test_postgresql.py b/tests/unit/test_postgresql.py index e87f9ba370..d62baec568 100644 --- a/tests/unit/test_postgresql.py +++ b/tests/unit/test_postgresql.py @@ -370,6 +370,27 @@ def test_get_last_archived_wal(harness): execute.assert_called_once_with("SELECT last_archived_wal FROM pg_stat_archiver;") +def test_build_postgresql_group_map(harness): + assert harness.charm.postgresql.build_postgresql_group_map(None) == [] + assert harness.charm.postgresql.build_postgresql_group_map("ldap_group=admin") == [] + + for group in ACCESS_GROUPS: + assert harness.charm.postgresql.build_postgresql_group_map(f"ldap_group={group}") == [] + + mapping_1 = "ldap_group_1=psql_group_1" + mapping_2 = "ldap_group_2=psql_group_2" + + assert harness.charm.postgresql.build_postgresql_group_map(f"{mapping_1},{mapping_2}") == [ + ("ldap_group_1", "psql_group_1"), + ("ldap_group_2", "psql_group_2"), + ] + try: + harness.charm.postgresql.build_postgresql_group_map(f"{mapping_1} {mapping_2}") + assert False + except ValueError: + assert True + + def test_build_postgresql_parameters(harness): # Test when not limit is imposed to the available memory. config_options = { @@ -463,3 +484,30 @@ def test_configure_pgaudit(harness): call("ALTER SYSTEM RESET pgaudit.log_parameter;"), call("SELECT pg_reload_conf();"), ]) + + +def test_validate_group_map(harness): + with patch( + "charms.postgresql_k8s.v0.postgresql.PostgreSQL._connect_to_database" + ) as _connect_to_database: + execute = _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.execute + _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.fetchone.return_value = None + + query = SQL("SELECT TRUE FROM pg_roles WHERE rolname={};") + + assert harness.charm.postgresql.validate_group_map(None) is True + + assert harness.charm.postgresql.validate_group_map("") is False + assert harness.charm.postgresql.validate_group_map("ldap_group=") is False + execute.assert_has_calls([ + call(query.format(Literal(""))), + ]) + + assert harness.charm.postgresql.validate_group_map("ldap_group=admin") is True + assert harness.charm.postgresql.validate_group_map("ldap_group=admin,") is False + assert harness.charm.postgresql.validate_group_map("ldap_group admin") is False + + assert harness.charm.postgresql.validate_group_map("ldap_group=missing_group") is False + execute.assert_has_calls([ + call(query.format(Literal("missing_group"))), + ])