From a7c1ea6c4ad73dd37fe3f226fbe920c59d2fb7cc Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 27 Feb 2025 14:02:03 +0200 Subject: [PATCH 01/12] Bump libs --- lib/charms/grafana_agent/v0/cos_agent.py | 9 ++++++++- lib/charms/rolling_ops/v0/rollingops.py | 8 ++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/lib/charms/grafana_agent/v0/cos_agent.py b/lib/charms/grafana_agent/v0/cos_agent.py index f1344c06ac..b18c271342 100644 --- a/lib/charms/grafana_agent/v0/cos_agent.py +++ b/lib/charms/grafana_agent/v0/cos_agent.py @@ -254,7 +254,7 @@ class _MetricsEndpointDict(TypedDict): LIBID = "dc15fa84cef84ce58155fb84f6c6213a" LIBAPI = 0 -LIBPATCH = 19 +LIBPATCH = 20 PYDEPS = ["cosl >= 0.0.50", "pydantic"] @@ -758,6 +758,13 @@ def _dashboards(self) -> List[str]: # because there is currently no other way to communicate the dashboard path separately. # https://github.com/canonical/grafana-k8s-operator/pull/363 dashboard["uid"] = DashboardPath40UID.generate(self._charm.meta.name, rel_path) + + # Add tags + tags: List[str] = dashboard.get("tags", []) + if not any(tag.startswith("charm: ") for tag in tags): + tags.append(f"charm: {self._charm.meta.name}") + dashboard["tags"] = tags + dashboards.append(LZMABase64.compress(json.dumps(dashboard))) return dashboards diff --git a/lib/charms/rolling_ops/v0/rollingops.py b/lib/charms/rolling_ops/v0/rollingops.py index 57aa9bf352..13b51a3051 100644 --- a/lib/charms/rolling_ops/v0/rollingops.py +++ b/lib/charms/rolling_ops/v0/rollingops.py @@ -63,13 +63,14 @@ def _on_trigger_restart(self, event): juju run-action some-charm/0 some-charm/1 <... some-charm/n> restart ``` -Note that all units that plan to restart must receive the action and emit the aquire +Note that all units that plan to restart must receive the action and emit the acquire event. Any units that do not run their acquire handler will be left out of the rolling restart. (An operator might take advantage of this fact to recover from a failed rolling operation without restarting workloads that were able to successfully restart -- simply omit the successful units from a subsequent run-action call.) """ + import logging from enum import Enum from typing import AnyStr, Callable, Optional @@ -88,7 +89,7 @@ def _on_trigger_restart(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 7 +LIBPATCH = 8 class LockNoRelationError(Exception): @@ -149,7 +150,6 @@ class Lock: """ def __init__(self, manager, unit=None): - self.relation = manager.model.relations[manager.name][0] if not self.relation: # TODO: defer caller in this case (probably just fired too soon). @@ -246,7 +246,7 @@ def __init__(self, manager): # Gather all the units. relation = manager.model.relations[manager.name][0] - units = [unit for unit in relation.units] + units = list(relation.units) # Plus our unit ... units.append(manager.model.unit) From 19817d1a7cda8b7a0861cbea6ee98e7b39373edf Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 27 Feb 2025 14:48:45 +0200 Subject: [PATCH 02/12] Add readonly URIs --- .../data_platform_libs/v0/data_interfaces.py | 27 +++++++++++++++++-- src/relations/postgresql_provider.py | 11 +++++++- .../new_relations/test_new_relations_1.py | 4 +-- tests/unit/test_postgresql_provider.py | 13 +++++++++ 4 files changed, 50 insertions(+), 5 deletions(-) diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py index 9717119030..e4cf7a2cb5 100644 --- a/lib/charms/data_platform_libs/v0/data_interfaces.py +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -331,7 +331,7 @@ def _on_topic_requested(self, event: TopicRequestedEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 41 +LIBPATCH = 42 PYDEPS = ["ops>=2.0.0"] @@ -960,6 +960,7 @@ class Data(ABC): "username": SECRET_GROUPS.USER, "password": SECRET_GROUPS.USER, "uris": SECRET_GROUPS.USER, + "read-only-uris": SECRET_GROUPS.USER, "tls": SECRET_GROUPS.TLS, "tls-ca": SECRET_GROUPS.TLS, } @@ -1700,7 +1701,7 @@ def set_tls_ca(self, relation_id: int, tls_ca: str) -> None: class RequirerData(Data): """Requirer-side of the relation.""" - SECRET_FIELDS = ["username", "password", "tls", "tls-ca", "uris"] + SECRET_FIELDS = ["username", "password", "tls", "tls-ca", "uris", "read-only-uris"] def __init__( self, @@ -2749,6 +2750,19 @@ def uris(self) -> Optional[str]: return self.relation.data[self.relation.app].get("uris") + @property + def read_only_uris(self) -> Optional[str]: + """Returns the readonly connection URIs.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("read-only-uris") + + return self.relation.data[self.relation.app].get("read-only-uris") + @property def version(self) -> Optional[str]: """Returns the version of the database. @@ -2855,6 +2869,15 @@ def set_uris(self, relation_id: int, uris: str) -> None: """ self.update_relation_data(relation_id, {"uris": uris}) + def set_read_only_uris(self, relation_id: int, uris: str) -> None: + """Set the database readonly connection URIs in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + uris: connection URIs. + """ + self.update_relation_data(relation_id, {"read-only-uris": uris}) + def set_version(self, relation_id: int, version: str) -> None: """Set the database version in the application relation databag. diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 487c6fb9e5..c5113797ce 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -192,7 +192,12 @@ def update_endpoints(self, event: DatabaseRequestedEvent = None) -> None: read_only_endpoints = ( ",".join(f"{x}:{DATABASE_PORT}" for x in replicas_endpoint) if len(replicas_endpoint) > 0 - else "" + else f"{self.charm.primary_endpoint}:{DATABASE_PORT}" + ) + read_only_hosts = ( + ",".join(replicas_endpoint) + if len(replicas_endpoint) > 0 + else f"{self.charm.primary_endpoint}" ) tls = "True" if self.charm.is_tls_enabled else "False" @@ -225,6 +230,10 @@ def update_endpoints(self, event: DatabaseRequestedEvent = None) -> None: relation_id, f"postgresql://{user}:{password}@{self.charm.primary_endpoint}:{DATABASE_PORT}/{database}", ) + self.database_provides.set_read_only_uris( + relation_id, + f"postgresql://{user}:{password}@{read_only_hosts}:{DATABASE_PORT}/{database}", + ) self.database_provides.set_tls(relation_id, tls) self.database_provides.set_tls_ca(relation_id, ca) diff --git a/tests/integration/new_relations/test_new_relations_1.py b/tests/integration/new_relations/test_new_relations_1.py index 7d051a0211..9e7b02b7a3 100644 --- a/tests/integration/new_relations/test_new_relations_1.py +++ b/tests/integration/new_relations/test_new_relations_1.py @@ -80,7 +80,7 @@ async def test_deploy_charms(ops_test: OpsTest, charm): await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active", timeout=3000) -async def test_no_read_only_endpoint_in_standalone_cluster(ops_test: OpsTest): +async def test_primary_read_only_endpoint_in_standalone_cluster(ops_test: OpsTest): """Test that there is no read-only endpoint in a standalone cluster.""" async with ops_test.fast_forward(): # Ensure the cluster starts with only one member. @@ -122,7 +122,7 @@ async def test_no_read_only_endpoint_in_standalone_cluster(ops_test: OpsTest): APPLICATION_APP_NAME, FIRST_DATABASE_RELATION_NAME, "read-only-endpoints", - exists=False, + exists=True, ) diff --git a/tests/unit/test_postgresql_provider.py b/tests/unit/test_postgresql_provider.py index 2aaed01083..42b3e9087d 100644 --- a/tests/unit/test_postgresql_provider.py +++ b/tests/unit/test_postgresql_provider.py @@ -256,6 +256,7 @@ def test_update_endpoints_with_event(harness): "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", + "read-only-uris": "postgresql://relation-2:test_password@2.2.2.2:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == {} @@ -266,7 +267,9 @@ def test_update_endpoints_with_event(harness): harness.charm.postgresql_client_relation.update_endpoints(mock_event) assert harness.get_relation_data(rel_id, harness.charm.app.name) == { "endpoints": "1.1.1.1:5432", + "read-only-endpoints": "1.1.1.1:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", + "read-only-uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == {} @@ -337,12 +340,14 @@ def test_update_endpoints_without_event(harness): "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", + "read-only-uris": "postgresql://relation-2:test_password@2.2.2.2:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == { "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432", "uris": "postgresql://relation-3:test_password@1.1.1.1:5432/test_db2", + "read-only-uris": "postgresql://relation-3:test_password@2.2.2.2:5432/test_db2", "tls": "False", } _fetch_my_relation_data.assert_called_once_with(None, ["password"]) @@ -354,12 +359,14 @@ def test_update_endpoints_without_event(harness): "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", + "read-only-uris": "postgresql://relation-2:test_password@2.2.2.2:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == { "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432", "uris": "postgresql://relation-3:test_password@1.1.1.1:5432/test_db2", + "read-only-uris": "postgresql://relation-3:test_password@2.2.2.2:5432/test_db2", "tls": "False", } @@ -370,12 +377,14 @@ def test_update_endpoints_without_event(harness): "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432,3.3.3.3:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", + "read-only-uris": "postgresql://relation-2:test_password@2.2.2.2,3.3.3.3:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == { "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432,3.3.3.3:5432", "uris": "postgresql://relation-3:test_password@1.1.1.1:5432/test_db2", + "read-only-uris": "postgresql://relation-3:test_password@2.2.2.2,3.3.3.3:5432/test_db2", "tls": "False", } @@ -385,11 +394,15 @@ def test_update_endpoints_without_event(harness): harness.charm.postgresql_client_relation.update_endpoints() assert harness.get_relation_data(rel_id, harness.charm.app.name) == { "endpoints": "1.1.1.1:5432", + "read-only-endpoints": "1.1.1.1:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", + "read-only-uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == { "endpoints": "1.1.1.1:5432", + "read-only-endpoints": "1.1.1.1:5432", "uris": "postgresql://relation-3:test_password@1.1.1.1:5432/test_db2", + "read-only-uris": "postgresql://relation-3:test_password@1.1.1.1:5432/test_db2", "tls": "False", } From 530031b4fffdf92a1f02e8bfa891d688ddebec1a Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 27 Feb 2025 16:21:42 +0200 Subject: [PATCH 03/12] Check the actual endpoint --- .../new_relations/test_new_relations_1.py | 38 ++++++++++++++----- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/tests/integration/new_relations/test_new_relations_1.py b/tests/integration/new_relations/test_new_relations_1.py index 9e7b02b7a3..714680cbae 100644 --- a/tests/integration/new_relations/test_new_relations_1.py +++ b/tests/integration/new_relations/test_new_relations_1.py @@ -116,7 +116,7 @@ async def test_primary_read_only_endpoint_in_standalone_cluster(ops_test: OpsTes assert password is None # Try to get the connection string of the database using the read-only endpoint. - # It should not be available. + # It should be the primary. assert await check_relation_data_existence( ops_test, APPLICATION_APP_NAME, @@ -124,6 +124,16 @@ async def test_primary_read_only_endpoint_in_standalone_cluster(ops_test: OpsTes "read-only-endpoints", exists=True, ) + primary_unit = ops_test.model.applications[DATABASE_APP_NAME].units[0] + for attempt in Retrying(stop=stop_after_attempt(3), wait=wait_fixed(3), reraise=True): + with attempt: + data = await get_application_relation_data( + ops_test, + APPLICATION_APP_NAME, + FIRST_DATABASE_RELATION_NAME, + "read-only-endpoints", + ) + assert data == f"{primary_unit.public_address}:5432" async def test_read_only_endpoint_in_scaled_up_cluster(ops_test: OpsTest): @@ -131,16 +141,24 @@ async def test_read_only_endpoint_in_scaled_up_cluster(ops_test: OpsTest): async with ops_test.fast_forward(): # Scale up the database. await scale_application(ops_test, DATABASE_APP_NAME, 2) + primary = await get_primary(ops_test, f"{DATABASE_APP_NAME}/0") + replica = next( + unit + for unit in ops_test.model.applications[DATABASE_APP_NAME].units + if unit.name != primary + ) # Try to get the connection string of the database using the read-only endpoint. - # It should be available again. - assert await check_relation_data_existence( - ops_test, - APPLICATION_APP_NAME, - FIRST_DATABASE_RELATION_NAME, - "read-only-endpoints", - exists=True, - ) + # It should be the replica unit. + for attempt in Retrying(stop=stop_after_attempt(3), wait=wait_fixed(3), reraise=True): + with attempt: + data = await get_application_relation_data( + ops_test, + APPLICATION_APP_NAME, + FIRST_DATABASE_RELATION_NAME, + "read-only-endpoints", + ) + assert data == f"{replica.public_address}:5432" async def test_database_relation_with_charm_libraries(ops_test: OpsTest): @@ -212,7 +230,7 @@ async def test_filter_out_degraded_replicas(ops_test: OpsTest): FIRST_DATABASE_RELATION_NAME, "read-only-endpoints", ) - assert data is None + assert data == f"{ops_test.model.units[primary].public_address}:5432" await start_machine(ops_test, machine) await ops_test.model.wait_for_idle( From 958f0704d29aab4d4600b5933d9efe256c99f4d5 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Fri, 28 Feb 2025 18:17:56 +0200 Subject: [PATCH 04/12] Remove check rel existance --- tests/integration/new_relations/helpers.py | 42 ------------------- .../new_relations/test_new_relations_1.py | 8 ---- 2 files changed, 50 deletions(-) diff --git a/tests/integration/new_relations/helpers.py b/tests/integration/new_relations/helpers.py index 3efaeac823..cd254b57b6 100644 --- a/tests/integration/new_relations/helpers.py +++ b/tests/integration/new_relations/helpers.py @@ -5,7 +5,6 @@ import yaml from pytest_operator.plugin import OpsTest -from tenacity import RetryError, Retrying, stop_after_attempt, wait_exponential async def get_juju_secret(ops_test: OpsTest, secret_uri: str) -> dict[str, str]: @@ -79,47 +78,6 @@ async def build_connection_string( return f"dbname='{database}' user='{username}' host='{host}' password='{password}' connect_timeout=10" -async def check_relation_data_existence( - ops_test: OpsTest, - application_name: str, - relation_name: str, - key: str, - exists: bool = True, -) -> bool: - """Checks for the existence of a key in the relation data. - - Args: - ops_test: The ops test framework instance - application_name: The name of the application - relation_name: Name of the relation to get relation data from - key: Key of data to be checked - exists: Whether to check for the existence or non-existence - - Returns: - whether the key exists in the relation data - """ - try: - # Retry mechanism used to wait for some events to be triggered, - # like the relation departed event. - for attempt in Retrying( - stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30) - ): - with attempt: - data = await get_application_relation_data( - ops_test, - application_name, - relation_name, - key, - ) - if exists: - assert data is not None - else: - assert data is None - return True - except RetryError: - return False - - async def get_alias_from_relation_data( ops_test: OpsTest, unit_name: str, related_unit_name: str ) -> str | None: diff --git a/tests/integration/new_relations/test_new_relations_1.py b/tests/integration/new_relations/test_new_relations_1.py index 714680cbae..e430b7919b 100644 --- a/tests/integration/new_relations/test_new_relations_1.py +++ b/tests/integration/new_relations/test_new_relations_1.py @@ -27,7 +27,6 @@ from ..juju_ import juju_major_version from .helpers import ( build_connection_string, - check_relation_data_existence, get_application_relation_data, ) @@ -117,13 +116,6 @@ async def test_primary_read_only_endpoint_in_standalone_cluster(ops_test: OpsTes # Try to get the connection string of the database using the read-only endpoint. # It should be the primary. - assert await check_relation_data_existence( - ops_test, - APPLICATION_APP_NAME, - FIRST_DATABASE_RELATION_NAME, - "read-only-endpoints", - exists=True, - ) primary_unit = ops_test.model.applications[DATABASE_APP_NAME].units[0] for attempt in Retrying(stop=stop_after_attempt(3), wait=wait_fixed(3), reraise=True): with attempt: From 53492b755724541224c5a2c1ef0dae74d404c4a2 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Tue, 4 Mar 2025 18:52:23 +0200 Subject: [PATCH 05/12] Conditional readonly uri --- src/relations/postgresql_provider.py | 14 ++++++++++---- tests/unit/test_postgresql_provider.py | 10 ---------- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index c5113797ce..c556f230f5 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -230,10 +230,16 @@ def update_endpoints(self, event: DatabaseRequestedEvent = None) -> None: relation_id, f"postgresql://{user}:{password}@{self.charm.primary_endpoint}:{DATABASE_PORT}/{database}", ) - self.database_provides.set_read_only_uris( - relation_id, - f"postgresql://{user}:{password}@{read_only_hosts}:{DATABASE_PORT}/{database}", - ) + # Make sure that the URI will be a secret + if ( + secret_fields := self.database_provides.fetch_relation_field( + relation_id, "requested-secrets" + ) + ) and "read-only-uris" in secret_fields: + self.database_provides.set_read_only_uris( + relation_id, + f"postgresql://{user}:{password}@{read_only_hosts}:{DATABASE_PORT}/{database}", + ) self.database_provides.set_tls(relation_id, tls) self.database_provides.set_tls_ca(relation_id, ca) diff --git a/tests/unit/test_postgresql_provider.py b/tests/unit/test_postgresql_provider.py index 42b3e9087d..fb753e92eb 100644 --- a/tests/unit/test_postgresql_provider.py +++ b/tests/unit/test_postgresql_provider.py @@ -256,7 +256,6 @@ def test_update_endpoints_with_event(harness): "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", - "read-only-uris": "postgresql://relation-2:test_password@2.2.2.2:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == {} @@ -269,7 +268,6 @@ def test_update_endpoints_with_event(harness): "endpoints": "1.1.1.1:5432", "read-only-endpoints": "1.1.1.1:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", - "read-only-uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == {} @@ -340,14 +338,12 @@ def test_update_endpoints_without_event(harness): "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", - "read-only-uris": "postgresql://relation-2:test_password@2.2.2.2:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == { "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432", "uris": "postgresql://relation-3:test_password@1.1.1.1:5432/test_db2", - "read-only-uris": "postgresql://relation-3:test_password@2.2.2.2:5432/test_db2", "tls": "False", } _fetch_my_relation_data.assert_called_once_with(None, ["password"]) @@ -359,14 +355,12 @@ def test_update_endpoints_without_event(harness): "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", - "read-only-uris": "postgresql://relation-2:test_password@2.2.2.2:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == { "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432", "uris": "postgresql://relation-3:test_password@1.1.1.1:5432/test_db2", - "read-only-uris": "postgresql://relation-3:test_password@2.2.2.2:5432/test_db2", "tls": "False", } @@ -377,14 +371,12 @@ def test_update_endpoints_without_event(harness): "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432,3.3.3.3:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", - "read-only-uris": "postgresql://relation-2:test_password@2.2.2.2,3.3.3.3:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == { "endpoints": "1.1.1.1:5432", "read-only-endpoints": "2.2.2.2:5432,3.3.3.3:5432", "uris": "postgresql://relation-3:test_password@1.1.1.1:5432/test_db2", - "read-only-uris": "postgresql://relation-3:test_password@2.2.2.2,3.3.3.3:5432/test_db2", "tls": "False", } @@ -396,13 +388,11 @@ def test_update_endpoints_without_event(harness): "endpoints": "1.1.1.1:5432", "read-only-endpoints": "1.1.1.1:5432", "uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", - "read-only-uris": "postgresql://relation-2:test_password@1.1.1.1:5432/test_db", "tls": "False", } assert harness.get_relation_data(another_rel_id, harness.charm.app.name) == { "endpoints": "1.1.1.1:5432", "read-only-endpoints": "1.1.1.1:5432", "uris": "postgresql://relation-3:test_password@1.1.1.1:5432/test_db2", - "read-only-uris": "postgresql://relation-3:test_password@1.1.1.1:5432/test_db2", "tls": "False", } From 1109902b1261ba92a91d6fe479742f8d4b49368d Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 5 Mar 2025 16:47:59 +0200 Subject: [PATCH 06/12] Bump libs --- lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py index ebf80ede2e..e2208f756f 100644 --- a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py +++ b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py @@ -325,7 +325,10 @@ def _remove_stale_otel_sdk_packages(): SpanExportResult, ) from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter -from opentelemetry.trace import INVALID_SPAN, Tracer +from opentelemetry.trace import ( + INVALID_SPAN, + Tracer, +) from opentelemetry.trace import get_current_span as otlp_get_current_span from opentelemetry.trace import ( get_tracer, @@ -345,7 +348,7 @@ def _remove_stale_otel_sdk_packages(): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 6 +LIBPATCH = 7 PYDEPS = ["opentelemetry-exporter-otlp-proto-http==1.21.0"] From 50b6b6ff31186df0fe18d780307c332f1782226a Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 13 Mar 2025 23:13:29 +0200 Subject: [PATCH 07/12] Check unit status --- tests/integration/ha_tests/helpers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/ha_tests/helpers.py b/tests/integration/ha_tests/helpers.py index 951659ee45..05d9d2bacd 100644 --- a/tests/integration/ha_tests/helpers.py +++ b/tests/integration/ha_tests/helpers.py @@ -954,6 +954,8 @@ async def add_unit_with_storage(ops_test, app, storage): Note: this function exists as a temporary solution until this issue is resolved: https://github.com/juju/python-libjuju/issues/695 """ + for unit in ops_test.model.applications[app].units: + logger.error(f"{unit.name} {unit.agent_status} {unit.workload_status}") expected_units = len(ops_test.model.applications[app].units) + 1 prev_units = [unit.name for unit in ops_test.model.applications[app].units] model_name = ops_test.model.info.name From 5a84388bf8622e827aeea650442db60f6787efc9 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Thu, 13 Mar 2025 23:57:24 +0200 Subject: [PATCH 08/12] Outlive the libjuju cache --- tests/integration/ha_tests/helpers.py | 2 -- tests/integration/ha_tests/test_restore_cluster.py | 7 +++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/integration/ha_tests/helpers.py b/tests/integration/ha_tests/helpers.py index 05d9d2bacd..951659ee45 100644 --- a/tests/integration/ha_tests/helpers.py +++ b/tests/integration/ha_tests/helpers.py @@ -954,8 +954,6 @@ async def add_unit_with_storage(ops_test, app, storage): Note: this function exists as a temporary solution until this issue is resolved: https://github.com/juju/python-libjuju/issues/695 """ - for unit in ops_test.model.applications[app].units: - logger.error(f"{unit.name} {unit.agent_status} {unit.workload_status}") expected_units = len(ops_test.model.applications[app].units) + 1 prev_units = [unit.name for unit in ops_test.model.applications[app].units] model_name = ops_test.model.info.name diff --git a/tests/integration/ha_tests/test_restore_cluster.py b/tests/integration/ha_tests/test_restore_cluster.py index 8a26b15cb5..2336ee9161 100644 --- a/tests/integration/ha_tests/test_restore_cluster.py +++ b/tests/integration/ha_tests/test_restore_cluster.py @@ -5,6 +5,7 @@ import pytest from pytest_operator.plugin import OpsTest +from tenacity import Retrying, stop_after_delay, wait_fixed from ..helpers import ( CHARM_BASE, @@ -81,6 +82,12 @@ async def test_cluster_restore(ops_test): "CREATE TABLE IF NOT EXISTS restore_table_1 (test_collumn INT );" ) connection.close() + logger.info("Wait for libjuju cache to go away") + for attempt in Retrying(stop=stop_after_delay(60 * 5), wait=wait_fixed(3), reraise=True): + with attempt: + for unit in ops_test.model.applications[SECOND_APPLICATION].units: + logger.error(f"{unit.name} {unit.agent_status} {unit.workload_status}") + assert len(ops_test.model.applications[SECOND_APPLICATION].units) == 0 logger.info("Downscaling the existing cluster") storages = [] From e48cb6f579601f96b36e3123d0a01100d76ea3b2 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Fri, 14 Mar 2025 01:24:01 +0200 Subject: [PATCH 09/12] Use sets to figure out the new unit --- tests/integration/ha_tests/helpers.py | 14 +++++++------- tests/integration/ha_tests/test_restore_cluster.py | 7 ------- 2 files changed, 7 insertions(+), 14 deletions(-) diff --git a/tests/integration/ha_tests/helpers.py b/tests/integration/ha_tests/helpers.py index 951659ee45..d9ea25543d 100644 --- a/tests/integration/ha_tests/helpers.py +++ b/tests/integration/ha_tests/helpers.py @@ -954,21 +954,21 @@ async def add_unit_with_storage(ops_test, app, storage): Note: this function exists as a temporary solution until this issue is resolved: https://github.com/juju/python-libjuju/issues/695 """ - expected_units = len(ops_test.model.applications[app].units) + 1 - prev_units = [unit.name for unit in ops_test.model.applications[app].units] + original_units = {unit.name for unit in ops_test.model.applications[app].units} model_name = ops_test.model.info.name add_unit_cmd = f"add-unit {app} --model={model_name} --attach-storage={storage}".split() return_code, _, _ = await ops_test.juju(*add_unit_cmd) assert return_code == 0, "Failed to add unit with storage" async with ops_test.fast_forward(): await ops_test.model.wait_for_idle(apps=[app], status="active", timeout=2000) - assert len(ops_test.model.applications[app].units) == expected_units, ( - "New unit not added to model" - ) + + # When removing all units sometimes the last unit remain in the list + current_units = {unit.name for unit in ops_test.model.applications[app].units} + original_units.intersection_update(current_units) + assert original_units.issubset(current_units), "New unit not added to model" # verify storage attached - curr_units = [unit.name for unit in ops_test.model.applications[app].units] - new_unit = next(unit for unit in set(curr_units) - set(prev_units)) + new_unit = (current_units - original_units).pop() assert storage_id(ops_test, new_unit) == storage, "unit added with incorrect storage" # return a reference to newly added unit diff --git a/tests/integration/ha_tests/test_restore_cluster.py b/tests/integration/ha_tests/test_restore_cluster.py index 2336ee9161..8a26b15cb5 100644 --- a/tests/integration/ha_tests/test_restore_cluster.py +++ b/tests/integration/ha_tests/test_restore_cluster.py @@ -5,7 +5,6 @@ import pytest from pytest_operator.plugin import OpsTest -from tenacity import Retrying, stop_after_delay, wait_fixed from ..helpers import ( CHARM_BASE, @@ -82,12 +81,6 @@ async def test_cluster_restore(ops_test): "CREATE TABLE IF NOT EXISTS restore_table_1 (test_collumn INT );" ) connection.close() - logger.info("Wait for libjuju cache to go away") - for attempt in Retrying(stop=stop_after_delay(60 * 5), wait=wait_fixed(3), reraise=True): - with attempt: - for unit in ops_test.model.applications[SECOND_APPLICATION].units: - logger.error(f"{unit.name} {unit.agent_status} {unit.workload_status}") - assert len(ops_test.model.applications[SECOND_APPLICATION].units) == 0 logger.info("Downscaling the existing cluster") storages = [] From 696c7f8e25a6d2a5505dc0a9a5405ce35eea8349 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Wed, 19 Mar 2025 16:19:08 +0200 Subject: [PATCH 10/12] Fix pitr aws test --- tests/integration/test_backups_pitr_aws.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_backups_pitr_aws.py b/tests/integration/test_backups_pitr_aws.py index 6220691ec9..2eaf9913e7 100644 --- a/tests/integration/test_backups_pitr_aws.py +++ b/tests/integration/test_backups_pitr_aws.py @@ -326,10 +326,10 @@ async def pitr_backup_operations( @pytest.mark.abort_on_fail async def test_pitr_backup_aws( - ops_test: OpsTest, gcp_cloud_configs: tuple[dict, dict], charm + ops_test: OpsTest, aws_cloud_configs: tuple[dict, dict], charm ) -> None: """Build, deploy two units of PostgreSQL and do backup in AWS. Then, write new data into DB, switch WAL file and test point-in-time-recovery restore action.""" - config, credentials = gcp_cloud_configs + config, credentials = aws_cloud_configs await pitr_backup_operations( ops_test, From c8788590ecf7dba24de4c91fe8de345fdec9a6ca Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Fri, 21 Mar 2025 17:01:18 +0200 Subject: [PATCH 11/12] Clear up tls branches --- .../data_platform_libs/v0/data_interfaces.py | 2 +- lib/charms/postgresql_k8s/v0/postgresql.py | 148 +++++++++++++++++- tests/integration/test_backups_aws.py | 5 +- tests/integration/test_backups_ceph.py | 6 +- tests/integration/test_backups_gcp.py | 5 +- tests/integration/test_backups_pitr_aws.py | 5 +- tests/integration/test_backups_pitr_gcp.py | 5 +- tests/integration/test_tls.py | 5 +- 8 files changed, 161 insertions(+), 20 deletions(-) diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py index e4cf7a2cb5..7fff3c4751 100644 --- a/lib/charms/data_platform_libs/v0/data_interfaces.py +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -2369,7 +2369,7 @@ def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: self.secret_fields, fields, self._update_relation_secret, - data={field: self.deleted_label for field in fields}, + data=dict.fromkeys(fields, self.deleted_label), ) else: _, normal_fields = self._process_secret_fields( diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index 8e2b7072ad..41be98a04f 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -35,7 +35,19 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 45 +LIBPATCH = 47 + +# Groups to distinguish HBA access +ACCESS_GROUP_IDENTITY = "identity_access" +ACCESS_GROUP_INTERNAL = "internal_access" +ACCESS_GROUP_RELATION = "relation_access" + +# List of access groups to filter role assignments by +ACCESS_GROUPS = [ + ACCESS_GROUP_IDENTITY, + ACCESS_GROUP_INTERNAL, + ACCESS_GROUP_RELATION, +] # Groups to distinguish database permissions PERMISSIONS_GROUP_ADMIN = "admin" @@ -57,10 +69,18 @@ logger = logging.getLogger(__name__) +class PostgreSQLAssignGroupError(Exception): + """Exception raised when assigning to a group fails.""" + + class PostgreSQLCreateDatabaseError(Exception): """Exception raised when creating a database fails.""" +class PostgreSQLCreateGroupError(Exception): + """Exception raised when creating a group fails.""" + + class PostgreSQLCreateUserError(Exception): """Exception raised when creating a user fails.""" @@ -93,6 +113,10 @@ class PostgreSQLGetPostgreSQLVersionError(Exception): """Exception raised when retrieving PostgreSQL version fails.""" +class PostgreSQLListGroupsError(Exception): + """Exception raised when retrieving PostgreSQL groups list fails.""" + + class PostgreSQLListUsersError(Exception): """Exception raised when retrieving PostgreSQL users list fails.""" @@ -160,6 +184,24 @@ def _connect_to_database( connection.autocommit = True return connection + def create_access_groups(self) -> None: + """Create access groups to distinguish HBA authentication methods.""" + connection = None + try: + with self._connect_to_database() as connection, connection.cursor() as cursor: + for group in ACCESS_GROUPS: + cursor.execute( + SQL("CREATE ROLE {} NOLOGIN;").format( + Identifier(group), + ) + ) + except psycopg2.Error as e: + logger.error(f"Failed to create access groups: {e}") + raise PostgreSQLCreateGroupError() from e + finally: + if connection is not None: + connection.close() + def create_database( self, database: str, @@ -321,6 +363,50 @@ def delete_user(self, user: str) -> None: logger.error(f"Failed to delete user: {e}") raise PostgreSQLDeleteUserError() from e + def grant_internal_access_group_memberships(self) -> None: + """Grant membership to the internal access-group to existing internal users.""" + connection = None + try: + with self._connect_to_database() as connection, connection.cursor() as cursor: + for user in self.system_users: + cursor.execute( + SQL("GRANT {} TO {};").format( + Identifier(ACCESS_GROUP_INTERNAL), + Identifier(user), + ) + ) + except psycopg2.Error as e: + logger.error(f"Failed to grant internal access group memberships: {e}") + raise PostgreSQLAssignGroupError() from e + finally: + if connection is not None: + connection.close() + + def grant_relation_access_group_memberships(self) -> None: + """Grant membership to the relation access-group to existing relation users.""" + rel_users = self.list_users_from_relation() + if not rel_users: + return + + connection = None + try: + with self._connect_to_database() as connection, connection.cursor() as cursor: + rel_groups = SQL(",").join(Identifier(group) for group in [ACCESS_GROUP_RELATION]) + rel_users = SQL(",").join(Identifier(user) for user in rel_users) + + cursor.execute( + SQL("GRANT {groups} TO {users};").format( + groups=rel_groups, + users=rel_users, + ) + ) + except psycopg2.Error as e: + logger.error(f"Failed to grant relation access group memberships: {e}") + raise PostgreSQLAssignGroupError() from e + finally: + if connection is not None: + connection.close() + def enable_disable_extensions( self, extensions: Dict[str, bool], database: Optional[str] = None ) -> None: @@ -483,6 +569,19 @@ def get_postgresql_timezones(self) -> Set[str]: timezones = cursor.fetchall() return {timezone[0] for timezone in timezones} + def get_postgresql_default_table_access_methods(self) -> Set[str]: + """Returns the PostgreSQL available table access methods. + + Returns: + Set of PostgreSQL table access methods. + """ + with self._connect_to_database( + database_host=self.current_host + ) as connection, connection.cursor() as cursor: + cursor.execute("SELECT amname FROM pg_am WHERE amtype = 't';") + access_methods = cursor.fetchall() + return {access_method[0] for access_method in access_methods} + def get_postgresql_version(self, current_host=True) -> str: """Returns the PostgreSQL version. @@ -521,6 +620,26 @@ def is_tls_enabled(self, check_current_host: bool = False) -> bool: # Connection errors happen when PostgreSQL has not started yet. return False + def list_access_groups(self) -> Set[str]: + """Returns the list of PostgreSQL database access groups. + + Returns: + List of PostgreSQL database access groups. + """ + try: + with self._connect_to_database() as connection, connection.cursor() as cursor: + cursor.execute( + "SELECT groname FROM pg_catalog.pg_group WHERE groname LIKE '%_access';" + ) + access_groups = cursor.fetchall() + return {group[0] for group in access_groups} + except psycopg2.Error as e: + logger.error(f"Failed to list PostgreSQL database access groups: {e}") + raise PostgreSQLListGroupsError() from e + finally: + if connection is not None: + connection.close() + def list_users(self) -> Set[str]: """Returns the list of PostgreSQL database users. @@ -535,6 +654,29 @@ def list_users(self) -> Set[str]: except psycopg2.Error as e: logger.error(f"Failed to list PostgreSQL database users: {e}") raise PostgreSQLListUsersError() from e + finally: + if connection is not None: + connection.close() + + def list_users_from_relation(self) -> Set[str]: + """Returns the list of PostgreSQL database users that were created by a relation. + + Returns: + List of PostgreSQL database users. + """ + try: + with self._connect_to_database() as connection, connection.cursor() as cursor: + cursor.execute( + "SELECT usename FROM pg_catalog.pg_user WHERE usename LIKE 'relation_id_%';" + ) + usernames = cursor.fetchall() + return {username[0] for username in usernames} + except psycopg2.Error as e: + logger.error(f"Failed to list PostgreSQL database users: {e}") + raise PostgreSQLListUsersError() from e + finally: + if connection is not None: + connection.close() def list_valid_privileges_and_roles(self) -> Tuple[Set[str], Set[str]]: """Returns two sets with valid privileges and roles. @@ -653,6 +795,8 @@ def build_postgresql_parameters( for config, value in config_options.items(): # Filter config option not related to PostgreSQL parameters. if not config.startswith(( + "connection", + "cpu", "durability", "instance", "logging", @@ -660,6 +804,8 @@ def build_postgresql_parameters( "optimizer", "request", "response", + "session", + "storage", "vacuum", )): continue diff --git a/tests/integration/test_backups_aws.py b/tests/integration/test_backups_aws.py index 49ed3545a2..6b56226fa6 100644 --- a/tests/integration/test_backups_aws.py +++ b/tests/integration/test_backups_aws.py @@ -7,7 +7,6 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_exponential -from . import architecture from .conftest import AWS from .helpers import ( DATABASE_APP_NAME, @@ -28,11 +27,11 @@ S3_INTEGRATOR_APP_NAME = "s3-integrator" if juju_major_version < 3: tls_certificates_app_name = "tls-certificates-operator" - tls_channel = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable" + tls_channel = "legacy/stable" tls_config = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"} else: tls_certificates_app_name = "self-signed-certificates" - tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" + tls_channel = "latest/stable" tls_config = {"ca-common-name": "Test CA"} logger = logging.getLogger(__name__) diff --git a/tests/integration/test_backups_ceph.py b/tests/integration/test_backups_ceph.py index bee798191a..7583fe80c6 100644 --- a/tests/integration/test_backups_ceph.py +++ b/tests/integration/test_backups_ceph.py @@ -14,7 +14,7 @@ import pytest from pytest_operator.plugin import OpsTest -from . import architecture, markers +from . import markers from .helpers import ( backup_operations, ) @@ -25,11 +25,11 @@ S3_INTEGRATOR_APP_NAME = "s3-integrator" if juju_major_version < 3: tls_certificates_app_name = "tls-certificates-operator" - tls_channel = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable" + tls_channel = "legacy/stable" tls_config = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"} else: tls_certificates_app_name = "self-signed-certificates" - tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" + tls_channel = "latest/stable" tls_config = {"ca-common-name": "Test CA"} backup_id, value_before_backup, value_after_backup = "", None, None diff --git a/tests/integration/test_backups_gcp.py b/tests/integration/test_backups_gcp.py index d88fd894de..3ec8979ec9 100644 --- a/tests/integration/test_backups_gcp.py +++ b/tests/integration/test_backups_gcp.py @@ -8,7 +8,6 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_exponential -from . import architecture from .conftest import GCP from .helpers import ( CHARM_BASE, @@ -28,11 +27,11 @@ S3_INTEGRATOR_APP_NAME = "s3-integrator" if juju_major_version < 3: tls_certificates_app_name = "tls-certificates-operator" - tls_channel = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable" + tls_channel = "legacy/stable" tls_config = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"} else: tls_certificates_app_name = "self-signed-certificates" - tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" + tls_channel = "latest/stable" tls_config = {"ca-common-name": "Test CA"} logger = logging.getLogger(__name__) diff --git a/tests/integration/test_backups_pitr_aws.py b/tests/integration/test_backups_pitr_aws.py index 2eaf9913e7..d835110179 100644 --- a/tests/integration/test_backups_pitr_aws.py +++ b/tests/integration/test_backups_pitr_aws.py @@ -7,7 +7,6 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_exponential -from . import architecture from .conftest import AWS from .helpers import ( CHARM_BASE, @@ -23,11 +22,11 @@ S3_INTEGRATOR_APP_NAME = "s3-integrator" if juju_major_version < 3: TLS_CERTIFICATES_APP_NAME = "tls-certificates-operator" - TLS_CHANNEL = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable" + TLS_CHANNEL = "legacy/stable" TLS_CONFIG = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"} else: TLS_CERTIFICATES_APP_NAME = "self-signed-certificates" - TLS_CHANNEL = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" + TLS_CHANNEL = "latest/stable" TLS_CONFIG = {"ca-common-name": "Test CA"} logger = logging.getLogger(__name__) diff --git a/tests/integration/test_backups_pitr_gcp.py b/tests/integration/test_backups_pitr_gcp.py index 4194bdd068..40b9e3a41f 100644 --- a/tests/integration/test_backups_pitr_gcp.py +++ b/tests/integration/test_backups_pitr_gcp.py @@ -7,7 +7,6 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_exponential -from . import architecture from .conftest import GCP from .helpers import ( CHARM_BASE, @@ -23,11 +22,11 @@ S3_INTEGRATOR_APP_NAME = "s3-integrator" if juju_major_version < 3: TLS_CERTIFICATES_APP_NAME = "tls-certificates-operator" - TLS_CHANNEL = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable" + TLS_CHANNEL = "legacy/stable" TLS_CONFIG = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"} else: TLS_CERTIFICATES_APP_NAME = "self-signed-certificates" - TLS_CHANNEL = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" + TLS_CHANNEL = "latest/stable" TLS_CONFIG = {"ca-common-name": "Test CA"} logger = logging.getLogger(__name__) diff --git a/tests/integration/test_tls.py b/tests/integration/test_tls.py index 31053c4677..8a02a03755 100644 --- a/tests/integration/test_tls.py +++ b/tests/integration/test_tls.py @@ -7,7 +7,6 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, stop_after_delay, wait_exponential, wait_fixed -from . import architecture from .ha_tests.helpers import ( change_patroni_setting, ) @@ -33,11 +32,11 @@ APP_NAME = METADATA["name"] if juju_major_version < 3: tls_certificates_app_name = "tls-certificates-operator" - tls_channel = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable" + tls_channel = "legacy/stable" tls_config = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"} else: tls_certificates_app_name = "self-signed-certificates" - tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" + tls_channel = "latest/stable" tls_config = {"ca-common-name": "Test CA"} From 1a92b2ffdd6c216186369e1c6b05fd25924a0309 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Fri, 21 Mar 2025 17:35:31 +0200 Subject: [PATCH 12/12] Revert pg lib --- lib/charms/postgresql_k8s/v0/postgresql.py | 148 +-------------------- 1 file changed, 1 insertion(+), 147 deletions(-) diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index 41be98a04f..8e2b7072ad 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -35,19 +35,7 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 47 - -# Groups to distinguish HBA access -ACCESS_GROUP_IDENTITY = "identity_access" -ACCESS_GROUP_INTERNAL = "internal_access" -ACCESS_GROUP_RELATION = "relation_access" - -# List of access groups to filter role assignments by -ACCESS_GROUPS = [ - ACCESS_GROUP_IDENTITY, - ACCESS_GROUP_INTERNAL, - ACCESS_GROUP_RELATION, -] +LIBPATCH = 45 # Groups to distinguish database permissions PERMISSIONS_GROUP_ADMIN = "admin" @@ -69,18 +57,10 @@ logger = logging.getLogger(__name__) -class PostgreSQLAssignGroupError(Exception): - """Exception raised when assigning to a group fails.""" - - class PostgreSQLCreateDatabaseError(Exception): """Exception raised when creating a database fails.""" -class PostgreSQLCreateGroupError(Exception): - """Exception raised when creating a group fails.""" - - class PostgreSQLCreateUserError(Exception): """Exception raised when creating a user fails.""" @@ -113,10 +93,6 @@ class PostgreSQLGetPostgreSQLVersionError(Exception): """Exception raised when retrieving PostgreSQL version fails.""" -class PostgreSQLListGroupsError(Exception): - """Exception raised when retrieving PostgreSQL groups list fails.""" - - class PostgreSQLListUsersError(Exception): """Exception raised when retrieving PostgreSQL users list fails.""" @@ -184,24 +160,6 @@ def _connect_to_database( connection.autocommit = True return connection - def create_access_groups(self) -> None: - """Create access groups to distinguish HBA authentication methods.""" - connection = None - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - for group in ACCESS_GROUPS: - cursor.execute( - SQL("CREATE ROLE {} NOLOGIN;").format( - Identifier(group), - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to create access groups: {e}") - raise PostgreSQLCreateGroupError() from e - finally: - if connection is not None: - connection.close() - def create_database( self, database: str, @@ -363,50 +321,6 @@ def delete_user(self, user: str) -> None: logger.error(f"Failed to delete user: {e}") raise PostgreSQLDeleteUserError() from e - def grant_internal_access_group_memberships(self) -> None: - """Grant membership to the internal access-group to existing internal users.""" - connection = None - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - for user in self.system_users: - cursor.execute( - SQL("GRANT {} TO {};").format( - Identifier(ACCESS_GROUP_INTERNAL), - Identifier(user), - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to grant internal access group memberships: {e}") - raise PostgreSQLAssignGroupError() from e - finally: - if connection is not None: - connection.close() - - def grant_relation_access_group_memberships(self) -> None: - """Grant membership to the relation access-group to existing relation users.""" - rel_users = self.list_users_from_relation() - if not rel_users: - return - - connection = None - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - rel_groups = SQL(",").join(Identifier(group) for group in [ACCESS_GROUP_RELATION]) - rel_users = SQL(",").join(Identifier(user) for user in rel_users) - - cursor.execute( - SQL("GRANT {groups} TO {users};").format( - groups=rel_groups, - users=rel_users, - ) - ) - except psycopg2.Error as e: - logger.error(f"Failed to grant relation access group memberships: {e}") - raise PostgreSQLAssignGroupError() from e - finally: - if connection is not None: - connection.close() - def enable_disable_extensions( self, extensions: Dict[str, bool], database: Optional[str] = None ) -> None: @@ -569,19 +483,6 @@ def get_postgresql_timezones(self) -> Set[str]: timezones = cursor.fetchall() return {timezone[0] for timezone in timezones} - def get_postgresql_default_table_access_methods(self) -> Set[str]: - """Returns the PostgreSQL available table access methods. - - Returns: - Set of PostgreSQL table access methods. - """ - with self._connect_to_database( - database_host=self.current_host - ) as connection, connection.cursor() as cursor: - cursor.execute("SELECT amname FROM pg_am WHERE amtype = 't';") - access_methods = cursor.fetchall() - return {access_method[0] for access_method in access_methods} - def get_postgresql_version(self, current_host=True) -> str: """Returns the PostgreSQL version. @@ -620,26 +521,6 @@ def is_tls_enabled(self, check_current_host: bool = False) -> bool: # Connection errors happen when PostgreSQL has not started yet. return False - def list_access_groups(self) -> Set[str]: - """Returns the list of PostgreSQL database access groups. - - Returns: - List of PostgreSQL database access groups. - """ - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute( - "SELECT groname FROM pg_catalog.pg_group WHERE groname LIKE '%_access';" - ) - access_groups = cursor.fetchall() - return {group[0] for group in access_groups} - except psycopg2.Error as e: - logger.error(f"Failed to list PostgreSQL database access groups: {e}") - raise PostgreSQLListGroupsError() from e - finally: - if connection is not None: - connection.close() - def list_users(self) -> Set[str]: """Returns the list of PostgreSQL database users. @@ -654,29 +535,6 @@ def list_users(self) -> Set[str]: except psycopg2.Error as e: logger.error(f"Failed to list PostgreSQL database users: {e}") raise PostgreSQLListUsersError() from e - finally: - if connection is not None: - connection.close() - - def list_users_from_relation(self) -> Set[str]: - """Returns the list of PostgreSQL database users that were created by a relation. - - Returns: - List of PostgreSQL database users. - """ - try: - with self._connect_to_database() as connection, connection.cursor() as cursor: - cursor.execute( - "SELECT usename FROM pg_catalog.pg_user WHERE usename LIKE 'relation_id_%';" - ) - usernames = cursor.fetchall() - return {username[0] for username in usernames} - except psycopg2.Error as e: - logger.error(f"Failed to list PostgreSQL database users: {e}") - raise PostgreSQLListUsersError() from e - finally: - if connection is not None: - connection.close() def list_valid_privileges_and_roles(self) -> Tuple[Set[str], Set[str]]: """Returns two sets with valid privileges and roles. @@ -795,8 +653,6 @@ def build_postgresql_parameters( for config, value in config_options.items(): # Filter config option not related to PostgreSQL parameters. if not config.startswith(( - "connection", - "cpu", "durability", "instance", "logging", @@ -804,8 +660,6 @@ def build_postgresql_parameters( "optimizer", "request", "response", - "session", - "storage", "vacuum", )): continue