Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 42 additions & 1 deletion src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

from charms.postgresql_k8s.v0.postgresql import PostgreSQL
from lightkube import ApiError, Client, codecs
from lightkube.resources.core_v1 import Pod
from lightkube.resources.core_v1 import Endpoints, Pod, Service
from ops.charm import (
ActionEvent,
CharmBase,
Expand Down Expand Up @@ -57,6 +57,7 @@ def __init__(self, *args):
self.framework.observe(self.on[PEER].relation_changed, self._on_peer_relation_changed)
self.framework.observe(self.on[PEER].relation_departed, self._on_peer_relation_departed)
self.framework.observe(self.on.postgresql_pebble_ready, self._on_postgresql_pebble_ready)
self.framework.observe(self.on.stop, self._on_stop)
self.framework.observe(self.on.upgrade_charm, self._on_upgrade_charm)
self.framework.observe(
self.on.get_operator_password_action, self._on_get_operator_password
Expand Down Expand Up @@ -437,6 +438,46 @@ def _on_get_primary(self, event: ActionEvent) -> None:
except RetryError as e:
logger.error(f"failed to get primary with error {e}")

def _on_stop(self, _) -> None:
"""Remove k8s resources created by the charm and Patroni."""
client = Client()

# Get the k8s resources created by the charm.
with open("src/resources.yaml") as f:
resources = codecs.load_all_yaml(f, context=self._context)
# Ignore the service resources, which will be retrieved in the next step.
resources_to_delete = list(
filter(
lambda x: not isinstance(x, Service),
resources,
)
)

# Get the k8s resources created by the charm and Patroni.
for kind in [Endpoints, Service]:
resources_to_delete.extend(
client.list(
kind,
namespace=self._namespace,
labels={"app.juju.is/created-by": f"{self._name}"},
)
)

# Delete the resources.
for resource in resources_to_delete:
try:
client.delete(
type(resource),
name=resource.metadata.name,
namespace=resource.metadata.namespace,
)
except ApiError as e:
if (
e.status.code != 404
): # 404 means that the resource was already deleted by other unit.
# Only log a message, as the charm is being stopped.
logger.error(f"failed to delete resource: {resource}.")

def _on_update_status(self, _) -> None:
# Display an active status message if the current unit is the primary.
try:
Expand Down
29 changes: 0 additions & 29 deletions src/resources.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -84,32 +84,3 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ app_name }}

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: patroni-k8s-ep-access
rules:
- apiGroups:
- ""
resources:
- endpoints
resourceNames:
- kubernetes
verbs:
- get

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: patroni-k8s-ep-access
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: patroni-k8s-ep-access
subjects:
- kind: ServiceAccount
name: {{ app_name }}
namespace: {{ namespace }}
126 changes: 126 additions & 0 deletions tests/integration/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@
import psycopg2
import requests
import yaml
from lightkube import codecs
from lightkube.core.client import Client
from lightkube.core.exceptions import ApiError
from lightkube.generic_resource import GenericNamespacedResource
from lightkube.resources.core_v1 import Endpoints, Service
from pytest_operator.plugin import OpsTest

METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
Expand Down Expand Up @@ -196,6 +201,110 @@ def get_application_units(ops_test: OpsTest, application_name: str) -> List[str]
]


def get_charm_resources(namespace: str, application: str) -> List[GenericNamespacedResource]:
"""Return the list of k8s resources from resources.yaml file.

Args:
namespace: namespace related to the model where
the charm was deployed.
application: application name.

Returns:
list of existing charm/Patroni specific k8s resources.
"""
# Define the context needed for the k8s resources lists load.
context = {"namespace": namespace, "app_name": application}

# Load the list of the resources from resources.yaml.
with open("src/resources.yaml") as f:
return codecs.load_all_yaml(f, context=context)


def get_existing_k8s_resources(namespace: str, application: str) -> set:
"""Return the list of k8s resources that were created by the charm and Patroni.

Args:
namespace: namespace related to the model where
the charm was deployed.
application: application name.

Returns:
list of existing charm/Patroni specific k8s resources.
"""
# Create a k8s API client instance.
client = Client(namespace=namespace)

# Retrieve the k8s resources the charm should create.
charm_resources = get_charm_resources(namespace, application)

# Add only the resources that currently exist.
resources = set(
map(
# Build an identifier for each resource (using its type and name).
lambda x: f"{type(x).__name__}/{x.metadata.name}",
filter(
lambda x: (resource_exists(client, x)),
charm_resources,
),
)
)

# Include the resources created by the charm and Patroni.
for kind in [Endpoints, Service]:
extra_resources = client.list(
kind,
namespace=namespace,
labels={"app.juju.is/created-by": application},
)
resources.update(
set(
map(
# Build an identifier for each resource (using its type and name).
lambda x: f"{kind.__name__}/{x.metadata.name}",
extra_resources,
)
)
)

return resources


def get_expected_k8s_resources(namespace: str, application: str) -> set:
"""Return the list of expected k8s resources when the charm is deployed.

Args:
namespace: namespace related to the model where
the charm was deployed.
application: application name.

Returns:
list of existing charm/Patroni specific k8s resources.
"""
# Retrieve the k8s resources created by the charm.
charm_resources = get_charm_resources(namespace, application)

# Build an identifier for each resource (using its type and name).
resources = set(
map(
lambda x: f"{type(x).__name__}/{x.metadata.name}",
charm_resources,
)
)

# Include the resources created by the charm and Patroni.
resources.update(
[
f"Endpoints/patroni-{application}-config",
f"Endpoints/patroni-{application}",
f"Endpoints/{application}-primary",
f"Endpoints/{application}-replicas",
f"Service/patroni-{application}-config",
]
)

return resources


async def get_operator_password(ops_test: OpsTest):
"""Retrieve the operator user password using the action."""
unit = ops_test.model.units.get(f"{DATABASE_APP_NAME}/0")
Expand Down Expand Up @@ -235,6 +344,23 @@ async def get_unit_address(ops_test: OpsTest, unit_name: str) -> str:
return status["applications"][unit_name.split("/")[0]].units[unit_name]["address"]


def resource_exists(client: Client, resource: GenericNamespacedResource) -> bool:
"""Check whether a specific resource exists.

Args:
client: k8s API client instance.
resource: k8s resource.

Returns:
whether the resource exists.
"""
try:
client.get(type(resource), name=resource.metadata.name)
return True
except ApiError:
return False


async def scale_application(ops_test: OpsTest, application_name: str, scale: int) -> None:
"""Scale a given application to a specific unit count.

Expand Down
38 changes: 38 additions & 0 deletions tests/integration/test_charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
convert_records_to_dict,
get_application_units,
get_cluster_members,
get_existing_k8s_resources,
get_expected_k8s_resources,
get_operator_password,
get_unit_address,
scale_application,
Expand Down Expand Up @@ -52,6 +54,16 @@ async def test_build_and_deploy(ops_test: OpsTest):
assert ops_test.model.applications[APP_NAME].units[unit_id].workload_status == "active"


@pytest.mark.charm
async def test_application_created_required_resources(ops_test: OpsTest) -> None:
# Compare the k8s resources that the charm and Patroni should create with
# the currently created k8s resources.
namespace = ops_test.model.info.name
existing_resources = get_existing_k8s_resources(namespace, APP_NAME)
expected_resources = get_expected_k8s_resources(namespace, APP_NAME)
assert set(existing_resources) == set(expected_resources)


@pytest.mark.parametrize("unit_id", UNIT_IDS)
async def test_labels_consistency_across_pods(ops_test: OpsTest, unit_id: int) -> None:
model = ops_test.model.info
Expand Down Expand Up @@ -259,11 +271,37 @@ async def test_application_removal(ops_test: OpsTest) -> None:
)
)

# Check that all k8s resources created by the charm and Patroni were removed.
namespace = ops_test.model.info.name
existing_resources = get_existing_k8s_resources(namespace, APP_NAME)
assert set(existing_resources) == set()

# Check whether the application is gone
# (in that situation, the units aren't in an error state).
assert APP_NAME not in ops_test.model.applications


@pytest.mark.charm
async def test_redeploy_charm_same_model(ops_test: OpsTest):
"""Redeploy the charm in the same model to test that it works."""
charm = await ops_test.build_charm(".")
async with ops_test.fast_forward():
await ops_test.model.deploy(
charm,
resources={
"postgresql-image": METADATA["resources"]["postgresql-image"]["upstream-source"]
},
application_name=APP_NAME,
num_units=3,
trust=True,
)

# This check is enough to ensure that the charm/workload is working for this specific test.
await ops_test.model.wait_for_idle(
apps=[APP_NAME], status="active", timeout=1000, wait_for_exact_units=3
)


@retry(
retry=retry_if_result(lambda x: not x),
stop=stop_after_attempt(10),
Expand Down