Skip to content

Commit d063f7d

Browse files
committed
Merge branch 'main' into password-rotation
2 parents db8dbd8 + 90d4778 commit d063f7d

File tree

4 files changed

+206
-30
lines changed

4 files changed

+206
-30
lines changed

src/charm.py

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
PostgreSQLUpdateUserPasswordError,
1313
)
1414
from lightkube import ApiError, Client, codecs
15-
from lightkube.resources.core_v1 import Pod
15+
from lightkube.resources.core_v1 import Endpoints, Pod, Service
1616
from ops.charm import (
1717
ActionEvent,
1818
CharmBase,
@@ -67,6 +67,7 @@ def __init__(self, *args):
6767
self.framework.observe(self.on[PEER].relation_changed, self._on_peer_relation_changed)
6868
self.framework.observe(self.on[PEER].relation_departed, self._on_peer_relation_departed)
6969
self.framework.observe(self.on.postgresql_pebble_ready, self._on_postgresql_pebble_ready)
70+
self.framework.observe(self.on.stop, self._on_stop)
7071
self.framework.observe(self.on.upgrade_charm, self._on_upgrade_charm)
7172
self.framework.observe(self.on.get_password_action, self._on_get_password)
7273
self.framework.observe(self.on.set_password_action, self._on_set_password)
@@ -511,6 +512,46 @@ def _on_get_primary(self, event: ActionEvent) -> None:
511512
except RetryError as e:
512513
logger.error(f"failed to get primary with error {e}")
513514

515+
def _on_stop(self, _) -> None:
516+
"""Remove k8s resources created by the charm and Patroni."""
517+
client = Client()
518+
519+
# Get the k8s resources created by the charm.
520+
with open("src/resources.yaml") as f:
521+
resources = codecs.load_all_yaml(f, context=self._context)
522+
# Ignore the service resources, which will be retrieved in the next step.
523+
resources_to_delete = list(
524+
filter(
525+
lambda x: not isinstance(x, Service),
526+
resources,
527+
)
528+
)
529+
530+
# Get the k8s resources created by the charm and Patroni.
531+
for kind in [Endpoints, Service]:
532+
resources_to_delete.extend(
533+
client.list(
534+
kind,
535+
namespace=self._namespace,
536+
labels={"app.juju.is/created-by": f"{self._name}"},
537+
)
538+
)
539+
540+
# Delete the resources.
541+
for resource in resources_to_delete:
542+
try:
543+
client.delete(
544+
type(resource),
545+
name=resource.metadata.name,
546+
namespace=resource.metadata.namespace,
547+
)
548+
except ApiError as e:
549+
if (
550+
e.status.code != 404
551+
): # 404 means that the resource was already deleted by other unit.
552+
# Only log a message, as the charm is being stopped.
553+
logger.error(f"failed to delete resource: {resource}.")
554+
514555
def _on_update_status(self, _) -> None:
515556
# Display an active status message if the current unit is the primary.
516557
try:

src/resources.yaml

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -84,32 +84,3 @@ roleRef:
8484
subjects:
8585
- kind: ServiceAccount
8686
name: {{ app_name }}
87-
88-
---
89-
apiVersion: rbac.authorization.k8s.io/v1
90-
kind: ClusterRole
91-
metadata:
92-
name: patroni-k8s-ep-access
93-
rules:
94-
- apiGroups:
95-
- ""
96-
resources:
97-
- endpoints
98-
resourceNames:
99-
- kubernetes
100-
verbs:
101-
- get
102-
103-
---
104-
apiVersion: rbac.authorization.k8s.io/v1
105-
kind: ClusterRoleBinding
106-
metadata:
107-
name: patroni-k8s-ep-access
108-
roleRef:
109-
apiGroup: rbac.authorization.k8s.io
110-
kind: ClusterRole
111-
name: patroni-k8s-ep-access
112-
subjects:
113-
- kind: ServiceAccount
114-
name: {{ app_name }}
115-
namespace: {{ namespace }}

tests/integration/helpers.py

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,11 @@
99
import psycopg2
1010
import requests
1111
import yaml
12+
from lightkube import codecs
13+
from lightkube.core.client import Client
14+
from lightkube.core.exceptions import ApiError
15+
from lightkube.generic_resource import GenericNamespacedResource
16+
from lightkube.resources.core_v1 import Endpoints, Service
1217
from pytest_operator.plugin import OpsTest
1318
from tenacity import retry, retry_if_result, stop_after_attempt, wait_exponential
1419

@@ -222,6 +227,110 @@ def get_application_units(ops_test: OpsTest, application_name: str) -> List[str]
222227
]
223228

224229

230+
def get_charm_resources(namespace: str, application: str) -> List[GenericNamespacedResource]:
231+
"""Return the list of k8s resources from resources.yaml file.
232+
233+
Args:
234+
namespace: namespace related to the model where
235+
the charm was deployed.
236+
application: application name.
237+
238+
Returns:
239+
list of existing charm/Patroni specific k8s resources.
240+
"""
241+
# Define the context needed for the k8s resources lists load.
242+
context = {"namespace": namespace, "app_name": application}
243+
244+
# Load the list of the resources from resources.yaml.
245+
with open("src/resources.yaml") as f:
246+
return codecs.load_all_yaml(f, context=context)
247+
248+
249+
def get_existing_k8s_resources(namespace: str, application: str) -> set:
250+
"""Return the list of k8s resources that were created by the charm and Patroni.
251+
252+
Args:
253+
namespace: namespace related to the model where
254+
the charm was deployed.
255+
application: application name.
256+
257+
Returns:
258+
list of existing charm/Patroni specific k8s resources.
259+
"""
260+
# Create a k8s API client instance.
261+
client = Client(namespace=namespace)
262+
263+
# Retrieve the k8s resources the charm should create.
264+
charm_resources = get_charm_resources(namespace, application)
265+
266+
# Add only the resources that currently exist.
267+
resources = set(
268+
map(
269+
# Build an identifier for each resource (using its type and name).
270+
lambda x: f"{type(x).__name__}/{x.metadata.name}",
271+
filter(
272+
lambda x: (resource_exists(client, x)),
273+
charm_resources,
274+
),
275+
)
276+
)
277+
278+
# Include the resources created by the charm and Patroni.
279+
for kind in [Endpoints, Service]:
280+
extra_resources = client.list(
281+
kind,
282+
namespace=namespace,
283+
labels={"app.juju.is/created-by": application},
284+
)
285+
resources.update(
286+
set(
287+
map(
288+
# Build an identifier for each resource (using its type and name).
289+
lambda x: f"{kind.__name__}/{x.metadata.name}",
290+
extra_resources,
291+
)
292+
)
293+
)
294+
295+
return resources
296+
297+
298+
def get_expected_k8s_resources(namespace: str, application: str) -> set:
299+
"""Return the list of expected k8s resources when the charm is deployed.
300+
301+
Args:
302+
namespace: namespace related to the model where
303+
the charm was deployed.
304+
application: application name.
305+
306+
Returns:
307+
list of existing charm/Patroni specific k8s resources.
308+
"""
309+
# Retrieve the k8s resources created by the charm.
310+
charm_resources = get_charm_resources(namespace, application)
311+
312+
# Build an identifier for each resource (using its type and name).
313+
resources = set(
314+
map(
315+
lambda x: f"{type(x).__name__}/{x.metadata.name}",
316+
charm_resources,
317+
)
318+
)
319+
320+
# Include the resources created by the charm and Patroni.
321+
resources.update(
322+
[
323+
f"Endpoints/patroni-{application}-config",
324+
f"Endpoints/patroni-{application}",
325+
f"Endpoints/{application}-primary",
326+
f"Endpoints/{application}-replicas",
327+
f"Service/patroni-{application}-config",
328+
]
329+
)
330+
331+
return resources
332+
333+
225334
async def get_password(ops_test: OpsTest, username: str = "operator"):
226335
"""Retrieve a user password using the action."""
227336
unit = ops_test.model.units.get(f"{DATABASE_APP_NAME}/0")
@@ -272,6 +381,23 @@ async def restart_patroni(ops_test: OpsTest, unit_name: str) -> None:
272381
requests.post(f"http://{unit_ip}:8008/restart")
273382

274383

384+
def resource_exists(client: Client, resource: GenericNamespacedResource) -> bool:
385+
"""Check whether a specific resource exists.
386+
387+
Args:
388+
client: k8s API client instance.
389+
resource: k8s resource.
390+
391+
Returns:
392+
whether the resource exists.
393+
"""
394+
try:
395+
client.get(type(resource), name=resource.metadata.name)
396+
return True
397+
except ApiError:
398+
return False
399+
400+
275401
async def scale_application(ops_test: OpsTest, application_name: str, scale: int) -> None:
276402
"""Scale a given application to a specific unit count.
277403

tests/integration/test_charm.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
convert_records_to_dict,
1919
get_application_units,
2020
get_cluster_members,
21+
get_existing_k8s_resources,
22+
get_expected_k8s_resources,
2123
get_password,
2224
get_unit_address,
2325
scale_application,
@@ -52,6 +54,16 @@ async def test_build_and_deploy(ops_test: OpsTest):
5254
assert ops_test.model.applications[APP_NAME].units[unit_id].workload_status == "active"
5355

5456

57+
@pytest.mark.charm
58+
async def test_application_created_required_resources(ops_test: OpsTest) -> None:
59+
# Compare the k8s resources that the charm and Patroni should create with
60+
# the currently created k8s resources.
61+
namespace = ops_test.model.info.name
62+
existing_resources = get_existing_k8s_resources(namespace, APP_NAME)
63+
expected_resources = get_expected_k8s_resources(namespace, APP_NAME)
64+
assert set(existing_resources) == set(expected_resources)
65+
66+
5567
@pytest.mark.parametrize("unit_id", UNIT_IDS)
5668
async def test_labels_consistency_across_pods(ops_test: OpsTest, unit_id: int) -> None:
5769
model = ops_test.model.info
@@ -259,11 +271,37 @@ async def test_application_removal(ops_test: OpsTest) -> None:
259271
)
260272
)
261273

274+
# Check that all k8s resources created by the charm and Patroni were removed.
275+
namespace = ops_test.model.info.name
276+
existing_resources = get_existing_k8s_resources(namespace, APP_NAME)
277+
assert set(existing_resources) == set()
278+
262279
# Check whether the application is gone
263280
# (in that situation, the units aren't in an error state).
264281
assert APP_NAME not in ops_test.model.applications
265282

266283

284+
@pytest.mark.charm
285+
async def test_redeploy_charm_same_model(ops_test: OpsTest):
286+
"""Redeploy the charm in the same model to test that it works."""
287+
charm = await ops_test.build_charm(".")
288+
async with ops_test.fast_forward():
289+
await ops_test.model.deploy(
290+
charm,
291+
resources={
292+
"postgresql-image": METADATA["resources"]["postgresql-image"]["upstream-source"]
293+
},
294+
application_name=APP_NAME,
295+
num_units=3,
296+
trust=True,
297+
)
298+
299+
# This check is enough to ensure that the charm/workload is working for this specific test.
300+
await ops_test.model.wait_for_idle(
301+
apps=[APP_NAME], status="active", timeout=1000, wait_for_exact_units=3
302+
)
303+
304+
267305
@retry(
268306
retry=retry_if_result(lambda x: not x),
269307
stop=stop_after_attempt(10),

0 commit comments

Comments
 (0)