Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ branch = true

[tool.coverage.report]
show_missing = true
exclude_lines = [
"logger\\.debug"
]

[tool.pytest.ini_options]
minversion = "6.0"
Expand Down
15 changes: 15 additions & 0 deletions src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,7 @@ def _on_peer_relation_departed(self, event: RelationDepartedEvent) -> None:
"""The leader removes the departing units from the list of cluster members."""
# Don't handle this event in the same unit that is departing.
if event.departing_unit == self.unit:
logger.debug("Early exit on_peer_relation_departed: Skipping departing unit")
return

# Remove the departing member from the raft cluster.
Expand All @@ -194,6 +195,9 @@ def _on_peer_relation_departed(self, event: RelationDepartedEvent) -> None:
member_ip = self._patroni.get_member_ip(departing_member)
self._patroni.remove_raft_member(member_ip)
except RemoveRaftMemberFailedError:
logger.debug(
"Deferring on_peer_relation_departed: Failed to remove member from raft cluster"
)
event.defer()
return

Expand All @@ -202,6 +206,7 @@ def _on_peer_relation_departed(self, event: RelationDepartedEvent) -> None:
return

if "cluster_initialised" not in self._peers.data[self.app]:
logger.debug("Deferring on_peer_relation_departed: cluster not initialized")
event.defer()
return

Expand Down Expand Up @@ -233,6 +238,7 @@ def _on_pgdata_storage_detaching(self, _) -> None:
# Ignore the event if the primary couldn't be retrieved.
# If a switchover is needed, an automatic failover will be triggered
# when the unit is removed.
logger.debug("Early exit on_pgdata_storage_detaching: primary cannot be retrieved")
return

if self.unit.name != primary:
Expand Down Expand Up @@ -276,6 +282,7 @@ def _on_peer_relation_changed(self, event: RelationChangedEvent):
"""Reconfigure cluster members when something changes."""
# Prevents the cluster to be reconfigured before it's bootstrapped in the leader.
if "cluster_initialised" not in self._peers.data[self.app]:
logger.debug("Deferring on_peer_relation_changed: cluster not initialized")
event.defer()
return

Expand All @@ -285,6 +292,7 @@ def _on_peer_relation_changed(self, event: RelationChangedEvent):

# Don't update this member before it's part of the members list.
if self._unit_ip not in self.members_ips:
logger.debug("Early exit on_peer_relation_changed: Unit not in the members list")
return

# Update the list of the cluster members in the replicas to make them know each other.
Expand All @@ -302,6 +310,7 @@ def _on_peer_relation_changed(self, event: RelationChangedEvent):

# Assert the member is up and running before marking the unit as active.
if not self._patroni.member_started:
logger.debug("Deferring on_peer_relation_changed: awaiting for member to start")
self.unit.status = WaitingStatus("awaiting for member to start")
event.defer()
return
Expand Down Expand Up @@ -335,6 +344,7 @@ def _add_members(self, event):
# Compare set of Patroni cluster members and Juju hosts
# to avoid the unnecessary reconfiguration.
if self._patroni.cluster_members == self._hosts:
logger.debug("Early exit add_members: Patroni members equal Juju hosts")
return

logger.info("Reconfiguring cluster")
Expand Down Expand Up @@ -563,6 +573,7 @@ def _on_leader_elected(self, event: LeaderElectedEvent) -> None:
# Don't update connection endpoints in the first time this event run for
# this application because there are no primary and replicas yet.
if "cluster_initialised" not in self._peers.data[self.app]:
logger.debug("Early exit on_leader_elected: Cluster not initialized")
return

# Only update the connection endpoints if there is a primary.
Expand Down Expand Up @@ -597,6 +608,7 @@ def _on_start(self, event) -> None:
# Doesn't try to bootstrap the cluster if it's in a blocked state
# caused, for example, because a failed installation of packages.
if self._has_blocked_status:
logger.debug("Early exit on_start: Unit blocked")
return

postgres_password = self._get_password()
Expand All @@ -609,6 +621,7 @@ def _on_start(self, event) -> None:
return

if not self.unit.is_leader() and "cluster_initialised" not in self._peers.data[self.app]:
logger.debug("Deferring on_start: awaiting for cluster to start")
self.unit.status = WaitingStatus("awaiting for cluster to start")
event.defer()
return
Expand All @@ -626,6 +639,7 @@ def _on_start(self, event) -> None:

# Assert the member is up and running before marking it as initialised.
if not self._patroni.member_started:
logger.debug("Deferring on_start: awaiting for member to start")
self.unit.status = WaitingStatus("awaiting for member to start")
event.defer()
return
Expand Down Expand Up @@ -839,6 +853,7 @@ def update_config(self) -> None:
# then mark TLS as enabled. This commonly happens when the charm is deployed
# in a bundle together with the TLS certificates operator.
self.unit_peer_data.update({"tls": "enabled" if enable_tls else ""})
logger.debug("Early exit update_config: Patroni not started yet")
return

restart_postgresql = enable_tls != self.postgresql.is_tls_enabled()
Expand Down
11 changes: 11 additions & 0 deletions src/relations/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,9 @@ def _on_relation_changed(self, event: RelationChangedEvent) -> None:
or not self.charm._patroni.member_started
or not self.charm.primary_endpoint
):
logger.debug(
"Deferring on_relation_changed: cluster not initialized, Patroni not started or primary endpoint not available"
)
event.defer()
return

Expand Down Expand Up @@ -160,6 +163,7 @@ def _on_relation_departed(self, event: RelationDepartedEvent) -> None:
if event.departing_unit == self.charm.unit:
self.charm._peers.data[self.charm.unit].update({"departing": "True"})
# Just run the rest of the logic for departing of remote units.
logger.debug("Early exit on_relation_departed: Skipping departing unit")
return

# Check for some conditions before trying to access the PostgreSQL instance.
Expand All @@ -171,6 +175,9 @@ def _on_relation_departed(self, event: RelationDepartedEvent) -> None:
or not self.charm._patroni.member_started
or not self.charm.primary_endpoint
):
logger.debug(
"Deferring on_relation_departed: cluster not initialized, Patroni not started or primary endpoint not available"
)
event.defer()
return

Expand All @@ -194,6 +201,9 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
or not self.charm._patroni.member_started
or not self.charm.primary_endpoint
):
logger.debug(
"Early exit on_relation_broken: Not leader, cluster not initialized, Patroni not started or no primary endpoint"
)
return

# Run this event only if this unit isn't being
Expand All @@ -203,6 +213,7 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
# Neither peer relation data nor stored state
# are good solutions, just a temporary solution.
if "departing" in self.charm._peers.data[self.charm.unit]:
logger.debug("Early exit on_relation_broken: Skipping departing unit")
return

# Delete the user.
Expand Down
7 changes: 7 additions & 0 deletions src/relations/postgresql_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None:
or not self.charm.primary_endpoint
):
event.defer()
logger.debug(
"Deferring on_database_requested: cluster not initialized, Patroni not started or primary endpoint not available"
)
return

# Retrieve the database name and extra user roles using the charm library.
Expand Down Expand Up @@ -123,6 +126,9 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
or not self.charm._patroni.member_started
or not self.charm.primary_endpoint
):
logger.debug(
"Early exit on_relation_broken: Not leader, cluster not initialized, Patroni not started or no primary endpoint"
)
return

# Run this event only if this unit isn't being
Expand All @@ -132,6 +138,7 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
# Neither peer relation data nor stored state
# are good solutions, just a temporary solution.
if "departing" in self.charm._peers.data[self.charm.unit]:
logger.debug("Early exit on_relation_broken: Skipping departing unit")
return

# Delete the user.
Expand Down
28 changes: 28 additions & 0 deletions tests/unit/test_charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import unittest
from unittest.mock import MagicMock, Mock, PropertyMock, call, mock_open, patch

import ops.testing
from charms.operator_libs_linux.v0 import apt
from charms.postgresql_k8s.v0.postgresql import (
PostgreSQLCreateUserError,
Expand All @@ -27,6 +28,9 @@ def setUp(self):
self._postgresql_container = "postgresql"
self._postgresql_service = "postgresql"

ops.testing.SIMULATE_CAN_CONNECT = True
self.addCleanup(setattr, ops.testing, "SIMULATE_CAN_CONNECT", False)

self.harness = Harness(PostgresqlOperatorCharm)
self.addCleanup(self.harness.cleanup)
self.harness.begin()
Expand Down Expand Up @@ -213,6 +217,30 @@ def test_on_start(
_oversee_users.assert_called_once()
self.assertTrue(isinstance(self.harness.model.unit.status, ActiveStatus))

@patch_network_get(private_address="1.1.1.1")
@patch("charm.PostgresqlOperatorCharm.postgresql")
@patch("charm.Patroni")
@patch("charm.PostgresqlOperatorCharm._get_password")
def test_on_start_no_patroni_member(
self,
_get_password,
patroni,
_postgresql,
):

# Mock the passwords.
patroni.return_value.member_started = False
_get_password.return_value = "fake-operator-password"
bootstrap_cluster = patroni.return_value.bootstrap_cluster
bootstrap_cluster.return_value = True

self.harness.set_leader()
self.charm.on.start.emit()
bootstrap_cluster.assert_called_once()
_postgresql.create_user.assert_not_called()
self.assertTrue(isinstance(self.harness.model.unit.status, WaitingStatus))
self.assertEqual(self.harness.model.unit.status.message, "awaiting for member to start")

@patch("charm.Patroni.bootstrap_cluster")
@patch("charm.PostgresqlOperatorCharm._replication_password")
@patch("charm.PostgresqlOperatorCharm._get_password")
Expand Down
3 changes: 2 additions & 1 deletion tests/unit/test_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,9 +188,10 @@ def test_render_patroni_service_file(self, _, _render_file):
0o644,
)

@patch("charm.Patroni._get_postgresql_version")
@patch("charm.Patroni.render_file")
@patch("charm.Patroni._create_directory")
def test_render_patroni_yml_file(self, _, _render_file):
def test_render_patroni_yml_file(self, _, _render_file, __):
# Define variables to render in the template.
member_name = "postgresql-0"
scope = "postgresql"
Expand Down
4 changes: 4 additions & 0 deletions tests/unit/test_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import unittest
from unittest.mock import MagicMock, Mock, PropertyMock, patch

import ops.testing
from charms.postgresql_k8s.v0.postgresql import (
PostgreSQLCreateDatabaseError,
PostgreSQLCreateUserError,
Expand All @@ -26,6 +27,9 @@
@patch_network_get(private_address="1.1.1.1")
class TestDbProvides(unittest.TestCase):
def setUp(self):
ops.testing.SIMULATE_CAN_CONNECT = True
self.addCleanup(setattr, ops.testing, "SIMULATE_CAN_CONNECT", False)

self.harness = Harness(PostgresqlOperatorCharm)
self.addCleanup(self.harness.cleanup)

Expand Down
4 changes: 4 additions & 0 deletions tests/unit/test_postgresql_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import unittest
from unittest.mock import Mock, PropertyMock, patch

import ops.testing
from charms.postgresql_k8s.v0.postgresql import (
PostgreSQLCreateDatabaseError,
PostgreSQLCreateUserError,
Expand All @@ -29,6 +30,9 @@
@patch_network_get(private_address="1.1.1.1")
class TestPostgreSQLProvider(unittest.TestCase):
def setUp(self):
ops.testing.SIMULATE_CAN_CONNECT = True
self.addCleanup(setattr, ops.testing, "SIMULATE_CAN_CONNECT", False)

self.harness = Harness(PostgresqlOperatorCharm)
self.addCleanup(self.harness.cleanup)

Expand Down
19 changes: 10 additions & 9 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ envlist = lint, unit
src_path = {toxinidir}/src/
tst_path = {toxinidir}/tests/
;lib_path = {toxinidir}/lib/charms/operator_name_with_underscores
all_path = {[vars]src_path} {[vars]tst_path}
all_path = {[vars]src_path} {[vars]tst_path}

[testenv]
setenv =
Expand Down Expand Up @@ -57,6 +57,7 @@ deps =
jsonschema
psycopg2-binary
pytest
pytest-asyncio
coverage[toml]
jinja2
-r{toxinidir}/requirements.txt
Expand All @@ -82,7 +83,7 @@ commands =
pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} -m charm_tests
# Remove the downloaded resource.
sh -c 'rm -f patroni.tar.gz'
whitelist_externals =
allowlist_externals =
sh

[testenv:database-relation-integration]
Expand All @@ -101,7 +102,7 @@ commands =
pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} -m database_relation_tests
# Remove the downloaded resource.
sh -c 'rm -f patroni.tar.gz'
whitelist_externals =
allowlist_externals =
sh

[testenv:db-relation-integration]
Expand All @@ -120,7 +121,7 @@ commands =
pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} -m db_relation_tests
# Remove the downloaded resource.
sh -c 'rm -f patroni.tar.gz'
whitelist_externals =
allowlist_externals =
sh

[testenv:db-admin-relation-integration]
Expand All @@ -139,7 +140,7 @@ commands =
pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} -m db_admin_relation_tests
# Remove the downloaded resource.
sh -c 'rm -f patroni.tar.gz'
whitelist_externals =
allowlist_externals =
sh

[testenv:ha-self-healing-integration]
Expand All @@ -158,7 +159,7 @@ commands =
pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} -m ha_self_healing_tests
# Remove the downloaded resource.
sh -c 'rm -f patroni.tar.gz'
whitelist_externals =
allowlist_externals =
sh

[testenv:password-rotation-integration]
Expand All @@ -177,7 +178,7 @@ commands =
pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} -m password_rotation_tests
# Remove the downloaded resource.
sh -c 'rm -f patroni.tar.gz'
whitelist_externals =
allowlist_externals =
sh

[testenv:tls-integration]
Expand All @@ -196,7 +197,7 @@ commands =
pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} -m tls_tests
# Remove the downloaded resource.
sh -c 'rm -f patroni.tar.gz'
whitelist_externals =
allowlist_externals =
sh

[testenv:integration]
Expand All @@ -215,5 +216,5 @@ commands =
pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs}
# Remove the downloaded resource.
sh -c 'rm -f patroni.tar.gz'
whitelist_externals =
allowlist_externals =
sh