diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index b5d7b5f0ed..15e8210f0a 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -30,20 +30,3 @@ def juju_has_secrets(mocker: MockerFixture): JujuVersion, "has_secrets", new_callable=PropertyMock ).return_value = True return True - - -@pytest.fixture -def only_with_juju_secrets(juju_has_secrets): - """Pretty way to skip Juju 3 tests.""" - if not juju_has_secrets: - pytest.skip("Secrets test only applies on Juju 3.x") - - -@pytest.fixture -def only_without_juju_secrets(juju_has_secrets): - """Pretty way to skip Juju 2-specific tests. - - Typically: to save CI time, when the same check were executed in a Juju 3-specific way already - """ - if juju_has_secrets: - pytest.skip("Skipping legacy secrets tests") diff --git a/tests/unit/test_backups.py b/tests/unit/test_backups.py index 527204ad9b..356505a7e1 100644 --- a/tests/unit/test_backups.py +++ b/tests/unit/test_backups.py @@ -1,10 +1,11 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. import datetime -import unittest from typing import OrderedDict +from unittest import TestCase from unittest.mock import MagicMock, PropertyMock, call, mock_open, patch +import pytest from boto3.exceptions import S3UploadFailedError from botocore.exceptions import ClientError from jinja2 import Template @@ -24,187 +25,188 @@ FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE = "failed to initialize stanza, check your S3 settings" S3_PARAMETERS_RELATION = "s3-parameters" +# used for assert functions +tc = TestCase() -class TestPostgreSQLBackups(unittest.TestCase): - @patch("charm.KubernetesServicePatch", lambda x, y: None) - def setUp(self): + +@pytest.fixture(autouse=True) +def harness(): + with patch("charm.KubernetesServicePatch", lambda x, y: None): # Mock generic sync client to avoid search to ~/.kube/config. - self.patcher = patch("lightkube.core.client.GenericSyncClient") - self.mock_k8s_client = self.patcher.start() + patcher = patch("lightkube.core.client.GenericSyncClient") + patcher.start() - self.harness = Harness(PostgresqlOperatorCharm) - self.addCleanup(self.harness.cleanup) + harness = Harness(PostgresqlOperatorCharm) # Set up the initial relation and hooks. - self.peer_rel_id = self.harness.add_relation(PEER, "postgresql-k8s") - self.harness.add_relation_unit(self.peer_rel_id, "postgresql-k8s/0") - self.harness.begin() - self.charm = self.harness.charm + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() - def relate_to_s3_integrator(self): - self.s3_rel_id = self.harness.add_relation(S3_PARAMETERS_RELATION, "s3-integrator") - def remove_relation_from_s3_integrator(self): - self.harness.remove_relation(S3_PARAMETERS_RELATION, "s3-integrator") - self.s3_rel_id = None +def test_stanza_name(harness): + tc.assertEqual( + harness.charm.backup.stanza_name, + f"{harness.charm.model.name}.{harness.charm.cluster_name}", + ) - def test_stanza_name(self): - self.assertEqual( - self.charm.backup.stanza_name, f"{self.charm.model.name}.{self.charm.cluster_name}" - ) - def test_are_backup_settings_ok(self): - # Test without S3 relation. - self.assertEqual( - self.charm.backup._are_backup_settings_ok(), - (False, "Relation with s3-integrator charm missing, cannot create/restore backup."), +def test_are_backup_settings_ok(harness): + # Test without S3 relation. + tc.assertEqual( + harness.charm.backup._are_backup_settings_ok(), + (False, "Relation with s3-integrator charm missing, cannot create/restore backup."), + ) + + # Test when there are missing S3 parameters. + harness.add_relation(S3_PARAMETERS_RELATION, "s3-integrator") + tc.assertEqual( + harness.charm.backup._are_backup_settings_ok(), + (False, "Missing S3 parameters: ['bucket', 'access-key', 'secret-key']"), + ) + + # Test when all required parameters are provided. + with patch("charm.PostgreSQLBackups._retrieve_s3_parameters") as _retrieve_s3_parameters: + _retrieve_s3_parameters.return_value = ["bucket", "access-key", "secret-key"], [] + tc.assertEqual( + harness.charm.backup._are_backup_settings_ok(), + (True, None), ) - # Test when there are missing S3 parameters. - self.relate_to_s3_integrator() - self.assertEqual( - self.charm.backup._are_backup_settings_ok(), - (False, "Missing S3 parameters: ['bucket', 'access-key', 'secret-key']"), - ) - - # Test when all required parameters are provided. - with patch("charm.PostgreSQLBackups._retrieve_s3_parameters") as _retrieve_s3_parameters: - _retrieve_s3_parameters.return_value = ["bucket", "access-key", "secret-key"], [] - self.assertEqual( - self.charm.backup._are_backup_settings_ok(), - (True, None), - ) - @patch("charm.PostgreSQLBackups._are_backup_settings_ok") - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - @patch("ops.model.Application.planned_units") - @patch("charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock) - def test_can_unit_perform_backup( - self, _is_primary, _planned_units, _member_started, _are_backup_settings_ok +def test_can_unit_perform_backup(harness): + with ( + patch("charm.PostgreSQLBackups._are_backup_settings_ok") as _are_backup_settings_ok, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("ops.model.Application.planned_units") as _planned_units, + patch( + "charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock + ) as _is_primary, ): # Test when the charm fails to retrieve the primary. + peer_rel_id = harness.model.get_relation(PEER).id _is_primary.side_effect = RetryError(last_attempt=1) - self.assertEqual( - self.charm.backup._can_unit_perform_backup(), + tc.assertEqual( + harness.charm.backup._can_unit_perform_backup(), (False, "Unit cannot perform backups as the database seems to be offline"), ) # Test when the unit is in a blocked state. _is_primary.side_effect = None - self.charm.unit.status = BlockedStatus("fake blocked state") - self.assertEqual( - self.charm.backup._can_unit_perform_backup(), + harness.charm.unit.status = BlockedStatus("fake blocked state") + tc.assertEqual( + harness.charm.backup._can_unit_perform_backup(), (False, "Unit is in a blocking state"), ) # Test when running the check in the primary, there are replicas and TLS is enabled. - self.charm.unit.status = ActiveStatus() + harness.charm.unit.status = ActiveStatus() _is_primary.return_value = True _planned_units.return_value = 2 - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.unit.name, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.unit.name, {"tls": "True"}, ) - self.assertEqual( - self.charm.backup._can_unit_perform_backup(), + tc.assertEqual( + harness.charm.backup._can_unit_perform_backup(), (False, "Unit cannot perform backups as it is the cluster primary"), ) # Test when running the check in a replica and TLS is disabled. _is_primary.return_value = False - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.unit.name, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.unit.name, {"tls": ""}, ) - self.assertEqual( - self.charm.backup._can_unit_perform_backup(), + tc.assertEqual( + harness.charm.backup._can_unit_perform_backup(), (False, "Unit cannot perform backups as TLS is not enabled"), ) # Test when Patroni or PostgreSQL hasn't started yet. _is_primary.return_value = True _member_started.return_value = False - self.assertEqual( - self.charm.backup._can_unit_perform_backup(), + tc.assertEqual( + harness.charm.backup._can_unit_perform_backup(), (False, "Unit cannot perform backups as it's not in running state"), ) # Test when the stanza was not initialised yet. _member_started.return_value = True - self.assertEqual( - self.charm.backup._can_unit_perform_backup(), + tc.assertEqual( + harness.charm.backup._can_unit_perform_backup(), (False, "Stanza was not initialised"), ) # Test when S3 parameters are not ok. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, - {"stanza": self.charm.backup.stanza_name}, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"stanza": harness.charm.backup.stanza_name}, ) _are_backup_settings_ok.return_value = (False, "fake error message") - self.assertEqual( - self.charm.backup._can_unit_perform_backup(), + tc.assertEqual( + harness.charm.backup._can_unit_perform_backup(), (False, "fake error message"), ) # Test when everything is ok to run a backup. _are_backup_settings_ok.return_value = (True, None) - self.assertEqual( - self.charm.backup._can_unit_perform_backup(), + tc.assertEqual( + harness.charm.backup._can_unit_perform_backup(), (True, None), ) - @patch("charm.Patroni.reload_patroni_configuration") - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch( - "charm.Patroni.rock_postgresql_version", new_callable=PropertyMock(return_value="14.10") - ) - @patch("charm.PostgreSQLBackups._execute_command") - def test_can_use_s3_repository( - self, - _execute_command, - _rock_postgresql_version, - _update_config, - _member_started, - _reload_patroni_configuration, + +def test_can_use_s3_repository(harness): + with ( + patch("charm.Patroni.reload_patroni_configuration") as _reload_patroni_configuration, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch( + "charm.Patroni.rock_postgresql_version", + new_callable=PropertyMock(return_value="14.10"), + ) as _rock_postgresql_version, + patch("charm.PostgreSQLBackups._execute_command") as _execute_command, ): + peer_rel_id = harness.model.get_relation(PEER).id # Define the stanza name inside the unit relation data. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, - {"stanza": self.charm.backup.stanza_name}, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"stanza": harness.charm.backup.stanza_name}, ) # Test when nothing is returned from the pgBackRest info command. _execute_command.return_value = (None, None) - self.assertEqual( - self.charm.backup.can_use_s3_repository(), + tc.assertEqual( + harness.charm.backup.can_use_s3_repository(), (False, FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE), ) # Test when the unit is a replica. pgbackrest_info_same_cluster_backup_output = ( - f'[{{"db": [{{"system-id": "12345"}}], "name": "{self.charm.backup.stanza_name}"}}]', + f'[{{"db": [{{"system-id": "12345"}}], "name": "{harness.charm.backup.stanza_name}"}}]', None, ) _execute_command.return_value = pgbackrest_info_same_cluster_backup_output - self.assertEqual( - self.charm.backup.can_use_s3_repository(), + tc.assertEqual( + harness.charm.backup.can_use_s3_repository(), (True, None), ) # Assert that the stanza name is still in the unit relation data. - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.app), - {"stanza": self.charm.backup.stanza_name}, + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app), + {"stanza": harness.charm.backup.stanza_name}, ) # Test when the unit is the leader and the workload is running, @@ -214,11 +216,11 @@ def test_can_use_s3_repository( pgbackrest_info_same_cluster_backup_output, ("", "fake error"), ] - with self.harness.hooks_disabled(): - self.harness.set_leader() - with self.assertRaises(Exception): - self.assertEqual( - self.charm.backup.can_use_s3_repository(), + with harness.hooks_disabled(): + harness.set_leader() + with tc.assertRaises(Exception): + tc.assertEqual( + harness.charm.backup.can_use_s3_repository(), (False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE), ) _update_config.assert_not_called() @@ -227,7 +229,7 @@ def test_can_use_s3_repository( # Test when the cluster system id can be retrieved, but it's different from the stanza system id. pgbackrest_info_other_cluster_system_id_backup_output = ( - f'[{{"db": [{{"system-id": "12345"}}], "name": "{self.charm.backup.stanza_name}"}}]', + f'[{{"db": [{{"system-id": "12345"}}], "name": "{harness.charm.backup.stanza_name}"}}]', None, ) other_instance_system_identifier_output = ( @@ -238,14 +240,14 @@ def test_can_use_s3_repository( pgbackrest_info_other_cluster_system_id_backup_output, other_instance_system_identifier_output, ] - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, - {"stanza": self.charm.backup.stanza_name}, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"stanza": harness.charm.backup.stanza_name}, ) - self.assertEqual( - self.charm.backup.can_use_s3_repository(), + tc.assertEqual( + harness.charm.backup.can_use_s3_repository(), (False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE), ) _update_config.assert_called_once() @@ -253,14 +255,14 @@ def test_can_use_s3_repository( _reload_patroni_configuration.assert_called_once() # Assert that the stanza name is not present in the unit relation data anymore. - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.app), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) # Test when the cluster system id can be retrieved, but it's different from the stanza system id. _update_config.reset_mock() _member_started.reset_mock() _reload_patroni_configuration.reset_mock() pgbackrest_info_other_cluster_name_backup_output = ( - f'[{{"db": [{{"system-id": "12345"}}], "name": "another-model.{self.charm.cluster_name}"}}]', + f'[{{"db": [{{"system-id": "12345"}}], "name": "another-model.{harness.charm.cluster_name}"}}]', None, ) same_instance_system_identifier_output = ( @@ -271,14 +273,14 @@ def test_can_use_s3_repository( pgbackrest_info_other_cluster_name_backup_output, same_instance_system_identifier_output, ] - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, - {"stanza": self.charm.backup.stanza_name}, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"stanza": harness.charm.backup.stanza_name}, ) - self.assertEqual( - self.charm.backup.can_use_s3_repository(), + tc.assertEqual( + harness.charm.backup.can_use_s3_repository(), (False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE), ) _update_config.assert_called_once() @@ -286,25 +288,25 @@ def test_can_use_s3_repository( _reload_patroni_configuration.assert_called_once() # Assert that the stanza name is not present in the unit relation data anymore. - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.app), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) # Test when the workload is not running. _update_config.reset_mock() _member_started.reset_mock() _reload_patroni_configuration.reset_mock() _member_started.return_value = False - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, - {"stanza": self.charm.backup.stanza_name}, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"stanza": harness.charm.backup.stanza_name}, ) _execute_command.side_effect = [ pgbackrest_info_same_cluster_backup_output, other_instance_system_identifier_output, ] - self.assertEqual( - self.charm.backup.can_use_s3_repository(), + tc.assertEqual( + harness.charm.backup.can_use_s3_repository(), (False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE), ) _update_config.assert_called_once() @@ -312,56 +314,60 @@ def test_can_use_s3_repository( _reload_patroni_configuration.assert_not_called() # Assert that the stanza name is not present in the unit relation data anymore. - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.app), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) # Test when there is no backup from another cluster in the S3 repository. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, - {"stanza": self.charm.backup.stanza_name}, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"stanza": harness.charm.backup.stanza_name}, ) _execute_command.side_effect = [ pgbackrest_info_same_cluster_backup_output, same_instance_system_identifier_output, ] - self.assertEqual( - self.charm.backup.can_use_s3_repository(), + tc.assertEqual( + harness.charm.backup.can_use_s3_repository(), (True, None), ) # Assert that the stanza name is still in the unit relation data. - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.app), - {"stanza": self.charm.backup.stanza_name}, + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app), + {"stanza": harness.charm.backup.stanza_name}, ) - def test_construct_endpoint(self): - # Test with an AWS endpoint without region. - s3_parameters = {"endpoint": "https://s3.amazonaws.com", "region": ""} - self.assertEqual( - self.charm.backup._construct_endpoint(s3_parameters), "https://s3.amazonaws.com" - ) - # Test with an AWS endpoint with region. - s3_parameters["region"] = "us-east-1" - self.assertEqual( - self.charm.backup._construct_endpoint(s3_parameters), - "https://s3.us-east-1.amazonaws.com", - ) +def test_construct_endpoint(harness): + # Test with an AWS endpoint without region. + s3_parameters = {"endpoint": "https://s3.amazonaws.com", "region": ""} + tc.assertEqual( + harness.charm.backup._construct_endpoint(s3_parameters), "https://s3.amazonaws.com" + ) - # Test with another cloud endpoint. - s3_parameters["endpoint"] = "https://storage.googleapis.com" - self.assertEqual( - self.charm.backup._construct_endpoint(s3_parameters), "https://storage.googleapis.com" - ) + # Test with an AWS endpoint with region. + s3_parameters["region"] = "us-east-1" + tc.assertEqual( + harness.charm.backup._construct_endpoint(s3_parameters), + "https://s3.us-east-1.amazonaws.com", + ) - @patch("boto3.session.Session.resource") - @patch("charm.PostgreSQLBackups._retrieve_s3_parameters") - def test_create_bucket_if_not_exists(self, _retrieve_s3_parameters, _resource): + # Test with another cloud endpoint. + s3_parameters["endpoint"] = "https://storage.googleapis.com" + tc.assertEqual( + harness.charm.backup._construct_endpoint(s3_parameters), "https://storage.googleapis.com" + ) + + +def test_create_bucket_if_not_exists(harness): + with ( + patch("boto3.session.Session.resource") as _resource, + patch("charm.PostgreSQLBackups._retrieve_s3_parameters") as _retrieve_s3_parameters, + ): # Test when there are missing S3 parameters. _retrieve_s3_parameters.return_value = ([], ["bucket", "access-key", "secret-key"]) - self.charm.backup._create_bucket_if_not_exists() + harness.charm.backup._create_bucket_if_not_exists() _resource.assert_not_called() # Test when the charm fails to create a boto3 session. @@ -376,15 +382,15 @@ def test_create_bucket_if_not_exists(self, _retrieve_s3_parameters, _resource): [], ) _resource.side_effect = ValueError - with self.assertRaises(ValueError): - self.charm.backup._create_bucket_if_not_exists() + with tc.assertRaises(ValueError): + harness.charm.backup._create_bucket_if_not_exists() # Test when the bucket already exists. _resource.side_effect = None head_bucket = _resource.return_value.Bucket.return_value.meta.client.head_bucket create = _resource.return_value.Bucket.return_value.create wait_until_exists = _resource.return_value.Bucket.return_value.wait_until_exists - self.charm.backup._create_bucket_if_not_exists() + harness.charm.backup._create_bucket_if_not_exists() head_bucket.assert_called_once() create.assert_not_called() wait_until_exists.assert_not_called() @@ -395,7 +401,7 @@ def test_create_bucket_if_not_exists(self, _retrieve_s3_parameters, _resource): error_response={"Error": {"Code": 1, "message": "fake error"}}, operation_name="fake operation name", ) - self.charm.backup._create_bucket_if_not_exists() + harness.charm.backup._create_bucket_if_not_exists() head_bucket.assert_called_once() create.assert_called_once() wait_until_exists.assert_called_once() @@ -408,56 +414,60 @@ def test_create_bucket_if_not_exists(self, _retrieve_s3_parameters, _resource): error_response={"Error": {"Code": 1, "message": "fake error"}}, operation_name="fake operation name", ) - with self.assertRaises(ClientError): - self.charm.backup._create_bucket_if_not_exists() + with tc.assertRaises(ClientError): + harness.charm.backup._create_bucket_if_not_exists() head_bucket.assert_called_once() create.assert_called_once() wait_until_exists.assert_not_called() - @patch("ops.model.Container.exec") - def test_empty_data_files(self, _exec): + +def test_empty_data_files(harness): + with patch("ops.model.Container.exec") as _exec: # Test when the removal of the data files fails. command = "rm -r /var/lib/postgresql/data/pgdata".split() _exec.side_effect = ExecError(command=command, exit_code=1, stdout="", stderr="fake error") - with self.assertRaises(ExecError): - self.charm.backup._empty_data_files() + with tc.assertRaises(ExecError): + harness.charm.backup._empty_data_files() _exec.assert_called_once_with(command) # Test when data files are successfully removed. _exec.reset_mock() _exec.side_effect = None - self.charm.backup._empty_data_files() + harness.charm.backup._empty_data_files() _exec.assert_called_once_with(command) - @patch("charm.PostgresqlOperatorCharm.update_config") - def test_change_connectivity_to_database(self, _update_config): + +def test_change_connectivity_to_database(harness): + with patch("charm.PostgresqlOperatorCharm.update_config") as _update_config: + peer_rel_id = harness.model.get_relation(PEER).id # Ensure that there is no connectivity info in the unit relation databag. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.unit.name, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.unit.name, {"connectivity": ""}, ) # Test when connectivity should be turned on. - self.charm.backup._change_connectivity_to_database(True) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), + harness.charm.backup._change_connectivity_to_database(True) + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.unit), {"connectivity": "on"}, ) _update_config.assert_called_once() # Test when connectivity should be turned off. _update_config.reset_mock() - self.charm.backup._change_connectivity_to_database(False) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), + harness.charm.backup._change_connectivity_to_database(False) + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.unit), {"connectivity": "off"}, ) _update_config.assert_called_once() - @patch("ops.model.Container.exec") - def test_execute_command(self, _exec): + +def test_execute_command(harness): + with patch("ops.model.Container.exec") as _exec: # Test when the command fails. command = "rm -r /var/lib/postgresql/data/pgdata".split() _exec.side_effect = ChangeError( @@ -475,44 +485,46 @@ def test_execute_command(self, _exec): ), ) _exec.return_value.wait_output.return_value = ("fake stdout", "") - self.assertEqual(self.charm.backup._execute_command(command), (None, None)) + tc.assertEqual(harness.charm.backup._execute_command(command), (None, None)) _exec.assert_called_once_with(command, user="postgres", group="postgres", timeout=None) # Test when the command runs successfully. _exec.reset_mock() _exec.side_effect = None - self.assertEqual( - self.charm.backup._execute_command(command, timeout=5), ("fake stdout", "") + tc.assertEqual( + harness.charm.backup._execute_command(command, timeout=5), ("fake stdout", "") ) _exec.assert_called_once_with(command, user="postgres", group="postgres", timeout=5) - def test_format_backup_list(self): - # Test when there are no backups. - self.assertEqual( - self.charm.backup._format_backup_list([]), - """backup-id | backup-type | backup-status + +def test_format_backup_list(harness): + # Test when there are no backups. + tc.assertEqual( + harness.charm.backup._format_backup_list([]), + """backup-id | backup-type | backup-status ----------------------------------------------------""", - ) + ) - # Test when there are backups. - backup_list = [ - ("2023-01-01T09:00:00Z", "physical", "failed: fake error"), - ("2023-01-01T10:00:00Z", "physical", "finished"), - ] - self.assertEqual( - self.charm.backup._format_backup_list(backup_list), - """backup-id | backup-type | backup-status + # Test when there are backups. + backup_list = [ + ("2023-01-01T09:00:00Z", "physical", "failed: fake error"), + ("2023-01-01T10:00:00Z", "physical", "finished"), + ] + tc.assertEqual( + harness.charm.backup._format_backup_list(backup_list), + """backup-id | backup-type | backup-status ---------------------------------------------------- 2023-01-01T09:00:00Z | physical | failed: fake error 2023-01-01T10:00:00Z | physical | finished""", - ) + ) + - @patch("charm.PostgreSQLBackups._execute_command") - def test_generate_backup_list_output(self, _execute_command): +def test_generate_backup_list_output(harness): + with patch("charm.PostgreSQLBackups._execute_command") as _execute_command: # Test when no backups are returned. _execute_command.return_value = ('[{"backup":[]}]', None) - self.assertEqual( - self.charm.backup._generate_backup_list_output(), + tc.assertEqual( + harness.charm.backup._generate_backup_list_output(), """backup-id | backup-type | backup-status ----------------------------------------------------""", ) @@ -522,20 +534,21 @@ def test_generate_backup_list_output(self, _execute_command): '[{"backup":[{"label":"20230101-090000F","error":"fake error"},{"label":"20230101-100000F","error":null}]}]', None, ) - self.assertEqual( - self.charm.backup._generate_backup_list_output(), + tc.assertEqual( + harness.charm.backup._generate_backup_list_output(), """backup-id | backup-type | backup-status ---------------------------------------------------- 2023-01-01T09:00:00Z | physical | failed: fake error 2023-01-01T10:00:00Z | physical | finished""", ) - @patch("charm.PostgreSQLBackups._execute_command") - def test_list_backups(self, _execute_command): + +def test_list_backups(harness): + with patch("charm.PostgreSQLBackups._execute_command") as _execute_command: # Test when no backups are available. _execute_command.return_value = ("[]", None) - self.assertEqual( - self.charm.backup._list_backups(show_failed=True), OrderedDict[str, str]() + tc.assertEqual( + harness.charm.backup._list_backups(show_failed=True), OrderedDict[str, str]() ) # Test when some backups are available. @@ -543,8 +556,8 @@ def test_list_backups(self, _execute_command): '[{"backup":[{"label":"20230101-090000F","error":"fake error"},{"label":"20230101-100000F","error":null}],"name":"test-stanza"}]', None, ) - self.assertEqual( - self.charm.backup._list_backups(show_failed=True), + tc.assertEqual( + harness.charm.backup._list_backups(show_failed=True), OrderedDict[str, str]([ ("2023-01-01T09:00:00Z", "test-stanza"), ("2023-01-01T10:00:00Z", "test-stanza"), @@ -552,43 +565,41 @@ def test_list_backups(self, _execute_command): ) # Test when some backups are available, but it's not desired to list failed backups. - self.assertEqual( - self.charm.backup._list_backups(show_failed=False), + tc.assertEqual( + harness.charm.backup._list_backups(show_failed=False), OrderedDict[str, str]([("2023-01-01T10:00:00Z", "test-stanza")]), ) - @patch("charm.Patroni.reload_patroni_configuration") - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - @patch("backups.wait_fixed", return_value=wait_fixed(0)) - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch("charm.PostgreSQLBackups._execute_command") - @patch("charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock) - def test_initialise_stanza( - self, - _is_primary, - _execute_command, - _update_config, - _, - _member_started, - _reload_patroni_configuration, + +def test_initialise_stanza(harness): + with ( + patch("charm.Patroni.reload_patroni_configuration") as _reload_patroni_configuration, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("backups.wait_fixed", return_value=wait_fixed(0)), + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch("charm.PostgreSQLBackups._execute_command") as _execute_command, + patch( + "charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock + ) as _is_primary, ): + peer_rel_id = harness.model.get_relation(PEER).id # Test when the unit is not the primary. _is_primary.return_value = False - self.charm.backup._initialise_stanza() + harness.charm.backup._initialise_stanza() _execute_command.assert_not_called() # Test when the unit is the primary, but it's in a blocked state # other than the ones can be solved by new S3 settings. _is_primary.return_value = True - self.charm.unit.status = BlockedStatus("fake blocked state") - self.charm.backup._initialise_stanza() + harness.charm.unit.status = BlockedStatus("fake blocked state") + harness.charm.backup._initialise_stanza() _execute_command.assert_not_called() # Test when the blocked state is any of the blocked stated that can be solved # by new S3 settings, but the stanza creation fails. stanza_creation_command = [ "pgbackrest", - f"--stanza={self.charm.backup.stanza_name}", + f"--stanza={harness.charm.backup.stanza_name}", "stanza-create", ] _execute_command.side_effect = ExecError( @@ -600,16 +611,16 @@ def test_initialise_stanza( FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE, ]: _execute_command.reset_mock() - self.charm.unit.status = BlockedStatus(blocked_state) - self.charm.backup._initialise_stanza() + harness.charm.unit.status = BlockedStatus(blocked_state) + harness.charm.backup._initialise_stanza() _execute_command.assert_called_once_with(stanza_creation_command) - self.assertIsInstance(self.charm.unit.status, BlockedStatus) - self.assertEqual( - self.charm.unit.status.message, FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE + tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) + tc.assertEqual( + harness.charm.unit.status.message, FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE ) # Assert there is no stanza name in the application relation databag. - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.app), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) # Test when the archiving is working correctly (pgBackRest check command succeeds) # and the unit is not the leader. @@ -618,68 +629,66 @@ def test_initialise_stanza( _member_started.reset_mock() _reload_patroni_configuration.reset_mock() _execute_command.side_effect = None - self.charm.backup._initialise_stanza() - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.app), {}) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), + harness.charm.backup._initialise_stanza() + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.unit), { - "stanza": f"{self.charm.model.name}.patroni-postgresql-k8s", + "stanza": f"{harness.charm.model.name}.patroni-postgresql-k8s", "init-pgbackrest": "True", }, ) - self.assertIsInstance(self.charm.unit.status, MaintenanceStatus) + tc.assertIsInstance(harness.charm.unit.status, MaintenanceStatus) # Test when the unit is the leader. - with self.harness.hooks_disabled(): - self.harness.set_leader() - self.harness.update_relation_data( - self.peer_rel_id, self.charm.unit.name, {"stanza": "", "init-pgbackrest": ""} + with harness.hooks_disabled(): + harness.set_leader() + harness.update_relation_data( + peer_rel_id, harness.charm.unit.name, {"stanza": "", "init-pgbackrest": ""} ) - self.charm.backup._initialise_stanza() - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.app), + harness.charm.backup._initialise_stanza() + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app), { - "stanza": f"{self.charm.model.name}.patroni-postgresql-k8s", + "stanza": f"{harness.charm.model.name}.patroni-postgresql-k8s", "init-pgbackrest": "True", }, ) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.unit), {}, ) - self.assertIsInstance(self.charm.unit.status, MaintenanceStatus) - - @patch("charm.Patroni.reload_patroni_configuration") - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - @patch("backups.wait_fixed", return_value=wait_fixed(0)) - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch("charm.PostgreSQLBackups._execute_command") - @patch("charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock) - def test_check_stanza( - self, - _is_primary, - _execute_command, - _update_config, - _, - _member_started, - _reload_patroni_configuration, + tc.assertIsInstance(harness.charm.unit.status, MaintenanceStatus) + + +def test_check_stanza(harness): + with ( + patch("charm.Patroni.reload_patroni_configuration") as _reload_patroni_configuration, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("backups.wait_fixed", return_value=wait_fixed(0)), + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch("charm.PostgreSQLBackups._execute_command") as _execute_command, + patch( + "charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock + ) as _is_primary, ): + peer_rel_id = harness.model.get_relation(PEER).id # Set peer data flag - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, {"stanza": "test-stanza", "init-pgbackrest": "True"}, ) - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.unit.name, + harness.update_relation_data( + peer_rel_id, + harness.charm.unit.name, {"stanza": "test-stanza", "init-pgbackrest": "True"}, ) # Test when the unit is not the primary. _is_primary.return_value = False - self.charm.backup.check_stanza() + harness.charm.backup.check_stanza() _execute_command.assert_not_called() # Set the unit as primary. @@ -687,7 +696,7 @@ def test_check_stanza( stanza_check_command = [ "pgbackrest", - f"--stanza={self.charm.backup.stanza_name}", + f"--stanza={harness.charm.backup.stanza_name}", "check", ] # Test when the archiving is not working correctly (pgBackRest check command fails). @@ -695,26 +704,28 @@ def test_check_stanza( command=stanza_check_command, exit_code=1, stdout="", stderr="fake error" ) _member_started.return_value = True - self.charm.backup.check_stanza() - self.assertEqual(_update_config.call_count, 2) - self.assertEqual(_member_started.call_count, 5) - self.assertEqual(_reload_patroni_configuration.call_count, 5) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.app), {}) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), {}) - self.assertIsInstance(self.charm.unit.status, BlockedStatus) - self.assertEqual(self.charm.unit.status.message, FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE) + harness.charm.backup.check_stanza() + tc.assertEqual(_update_config.call_count, 2) + tc.assertEqual(_member_started.call_count, 5) + tc.assertEqual(_reload_patroni_configuration.call_count, 5) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) + tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) + tc.assertEqual( + harness.charm.unit.status.message, FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE + ) # Test when the archiving is working correctly (pgBackRest check command succeeds) # and the unit is not the leader. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, {"stanza": "test-stanza", "init-pgbackrest": "True"}, ) - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.unit.name, + harness.update_relation_data( + peer_rel_id, + harness.charm.unit.name, {"stanza": "test-stanza", "init-pgbackrest": "True"}, ) _execute_command.reset_mock() @@ -722,188 +733,181 @@ def test_check_stanza( _member_started.reset_mock() _reload_patroni_configuration.reset_mock() _execute_command.side_effect = None - self.charm.backup.check_stanza() + harness.charm.backup.check_stanza() _update_config.assert_called_once() _member_started.assert_called_once() _reload_patroni_configuration.assert_called_once() - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.app), + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app), {"stanza": "test-stanza", "init-pgbackrest": "True"}, ) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.unit), {"stanza": "test-stanza"}, ) - self.assertIsInstance(self.charm.unit.status, ActiveStatus) + tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) # Test when the unit is the leader. - self.charm.unit.status = BlockedStatus("fake blocked state") - with self.harness.hooks_disabled(): - self.harness.set_leader() - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, + harness.charm.unit.status = BlockedStatus("fake blocked state") + with harness.hooks_disabled(): + harness.set_leader() + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, {"init-pgbackrest": "True"}, ) - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.unit.name, + harness.update_relation_data( + peer_rel_id, + harness.charm.unit.name, {"init-pgbackrest": "True"}, ) _update_config.reset_mock() _member_started.reset_mock() _reload_patroni_configuration.reset_mock() - self.charm.backup.check_stanza() + harness.charm.backup.check_stanza() _update_config.assert_called_once() _member_started.assert_called_once() _reload_patroni_configuration.assert_called_once() - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.app), + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app), {"stanza": "test-stanza"}, ) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.unit), {"stanza": "test-stanza"}, ) - self.assertIsInstance(self.charm.unit.status, ActiveStatus) - - def test_coordinate_stanza_fields(self): - # Add a new unit to the relation. - new_unit_name = "postgresql-k8s/1" - new_unit = Unit(new_unit_name, None, self.harness.charm.app._backend, {}) - self.harness.add_relation_unit(self.peer_rel_id, new_unit_name) - - # Test when the stanza name is neither in the application relation databag nor in the unit relation databag. - self.charm.backup.coordinate_stanza_fields() - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.app), {}) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), {}) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, new_unit), {}) - - # Test when the stanza name is in the unit relation databag but the unit is not the leader. - stanza_name = f"{self.charm.model.name}.patroni-{self.charm.app.name}" - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, new_unit_name, {"stanza": stanza_name, "init-pgbackrest": "True"} - ) - self.charm.backup.coordinate_stanza_fields() - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.app), {}) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), {}) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, new_unit), - {"stanza": stanza_name, "init-pgbackrest": "True"}, - ) + tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) + + +def test_coordinate_stanza_fields(harness): + # Add a new unit to the relation. + peer_rel_id = harness.model.get_relation(PEER).id + new_unit_name = "postgresql-k8s/1" + new_unit = Unit(new_unit_name, None, harness.charm.app._backend, {}) + harness.add_relation_unit(peer_rel_id, new_unit_name) + + # Test when the stanza name is neither in the application relation databag nor in the unit relation databag. + harness.charm.backup.coordinate_stanza_fields() + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, new_unit), {}) + + # Test when the stanza name is in the unit relation databag but the unit is not the leader. + stanza_name = f"{harness.charm.model.name}.patroni-{harness.charm.app.name}" + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, new_unit_name, {"stanza": stanza_name, "init-pgbackrest": "True"} + ) + harness.charm.backup.coordinate_stanza_fields() + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) + tc.assertEqual( + harness.get_relation_data(peer_rel_id, new_unit), + {"stanza": stanza_name, "init-pgbackrest": "True"}, + ) - # Test when the unit is the leader. - with self.harness.hooks_disabled(): - self.harness.set_leader() - self.charm.backup.coordinate_stanza_fields() - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.app), - {"stanza": stanza_name, "init-pgbackrest": "True"}, - ) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), {}) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, new_unit), - {"stanza": stanza_name, "init-pgbackrest": "True"}, - ) - - # Test when the stanza was already checked in the primary non-leader unit. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, new_unit_name, {"init-pgbackrest": ""} - ) - self.charm.backup.coordinate_stanza_fields() - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.app), - {"stanza": stanza_name}, - ) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), {}) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, new_unit), {"stanza": stanza_name} - ) + # Test when the unit is the leader. + with harness.hooks_disabled(): + harness.set_leader() + harness.charm.backup.coordinate_stanza_fields() + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app), + {"stanza": stanza_name, "init-pgbackrest": "True"}, + ) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) + tc.assertEqual( + harness.get_relation_data(peer_rel_id, new_unit), + {"stanza": stanza_name, "init-pgbackrest": "True"}, + ) - # Test when the "init-pgbackrest" flag was removed from the application relation databag - # and this is the unit that has the stanza name in the unit relation databag. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, self.charm.unit.name, {"stanza": stanza_name} - ) - self.charm.backup.coordinate_stanza_fields() - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.app), - {"stanza": stanza_name}, - ) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), {}) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, new_unit), {"stanza": stanza_name} - ) + # Test when the stanza was already checked in the primary non-leader unit. + with harness.hooks_disabled(): + harness.update_relation_data(peer_rel_id, new_unit_name, {"init-pgbackrest": ""}) + harness.charm.backup.coordinate_stanza_fields() + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app), + {"stanza": stanza_name}, + ) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, new_unit), {"stanza": stanza_name}) + + # Test when the "init-pgbackrest" flag was removed from the application relation databag + # and this is the unit that has the stanza name in the unit relation databag. + with harness.hooks_disabled(): + harness.update_relation_data(peer_rel_id, harness.charm.unit.name, {"stanza": stanza_name}) + harness.charm.backup.coordinate_stanza_fields() + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app), + {"stanza": stanza_name}, + ) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, new_unit), {"stanza": stanza_name}) + + # Test when the unit is not the leader. + with harness.hooks_disabled(): + harness.set_leader(False) + harness.update_relation_data(peer_rel_id, harness.charm.unit.name, {"stanza": stanza_name}) + harness.charm.backup.coordinate_stanza_fields() + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app), + {"stanza": stanza_name}, + ) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, new_unit), {"stanza": stanza_name}) - # Test when the unit is not the leader. - with self.harness.hooks_disabled(): - self.harness.set_leader(False) - self.harness.update_relation_data( - self.peer_rel_id, self.charm.unit.name, {"stanza": stanza_name} - ) - self.charm.backup.coordinate_stanza_fields() - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.app), - {"stanza": stanza_name}, - ) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), {}) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, new_unit), {"stanza": stanza_name} - ) - @patch("charm.PostgreSQLBackups._execute_command") - @patch("charm.PostgresqlOperatorCharm._get_hostname_from_unit") - @patch("charm.Patroni.get_primary") - def test_is_primary_pgbackrest_service_running( - self, _get_primary, _get_hostname_from_unit, _execute_command +def test_is_primary_pgbackrest_service_running(harness): + with ( + patch("charm.PostgreSQLBackups._execute_command") as _execute_command, + patch("charm.PostgresqlOperatorCharm._get_hostname_from_unit") as _get_hostname_from_unit, + patch("charm.Patroni.get_primary") as _get_primary, ): # Test when the charm fails to get the current primary. _get_primary.side_effect = RetryError(last_attempt=1) - self.assertEqual(self.charm.backup._is_primary_pgbackrest_service_running, False) + tc.assertEqual(harness.charm.backup._is_primary_pgbackrest_service_running, False) _execute_command.assert_not_called() # Test when the primary was not elected yet. _get_primary.side_effect = None _get_primary.return_value = None - self.assertEqual(self.charm.backup._is_primary_pgbackrest_service_running, False) + tc.assertEqual(harness.charm.backup._is_primary_pgbackrest_service_running, False) _execute_command.assert_not_called() # Test when the pgBackRest fails to contact the primary server. - _get_primary.return_value = f"{self.charm.app.name}/1" + _get_primary.return_value = f"{harness.charm.app.name}/1" _execute_command.side_effect = ExecError( command="fake command".split(), exit_code=1, stdout="", stderr="fake error" ) - self.assertEqual(self.charm.backup._is_primary_pgbackrest_service_running, False) + tc.assertEqual(harness.charm.backup._is_primary_pgbackrest_service_running, False) _execute_command.assert_called_once() # Test when the pgBackRest succeeds on contacting the primary server. _execute_command.reset_mock() _execute_command.side_effect = None - self.assertEqual(self.charm.backup._is_primary_pgbackrest_service_running, True) + tc.assertEqual(harness.charm.backup._is_primary_pgbackrest_service_running, True) _execute_command.assert_called_once() - @patch("charm.PostgreSQLBackups._initialise_stanza") - @patch("charm.PostgreSQLBackups.can_use_s3_repository") - @patch("charm.PostgreSQLBackups._create_bucket_if_not_exists") - @patch("charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock) - @patch("charm.PostgreSQLBackups._render_pgbackrest_conf_file") - @patch("ops.framework.EventBase.defer") - def test_on_s3_credential_changed( - self, - _defer, - _render_pgbackrest_conf_file, - _is_primary, - _create_bucket_if_not_exists, - _can_use_s3_repository, - _initialise_stanza, + +def test_on_s3_credential_changed(harness): + with ( + patch("charm.PostgreSQLBackups._initialise_stanza") as _initialise_stanza, + patch("charm.PostgreSQLBackups.can_use_s3_repository") as _can_use_s3_repository, + patch( + "charm.PostgreSQLBackups._create_bucket_if_not_exists" + ) as _create_bucket_if_not_exists, + patch( + "charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock + ) as _is_primary, + patch( + "charm.PostgreSQLBackups._render_pgbackrest_conf_file" + ) as _render_pgbackrest_conf_file, + patch("ops.framework.EventBase.defer") as _defer, ): + peer_rel_id = harness.model.get_relation(PEER).id # Test when the cluster was not initialised yet. - self.relate_to_s3_integrator() - self.charm.backup.s3_client.on.credentials_changed.emit( - relation=self.harness.model.get_relation(S3_PARAMETERS_RELATION, self.s3_rel_id) + s3_rel_id = harness.add_relation(S3_PARAMETERS_RELATION, "s3-integrator") + harness.charm.backup.s3_client.on.credentials_changed.emit( + relation=harness.model.get_relation(S3_PARAMETERS_RELATION, s3_rel_id) ) _defer.assert_called_once() _render_pgbackrest_conf_file.assert_not_called() @@ -914,15 +918,15 @@ def test_on_s3_credential_changed( # Test when the cluster is already initialised, but the charm fails to render # the pgBackRest configuration file due to missing S3 parameters. _defer.reset_mock() - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, {"cluster_initialised": "True"}, ) _render_pgbackrest_conf_file.return_value = False - self.charm.backup.s3_client.on.credentials_changed.emit( - relation=self.harness.model.get_relation(S3_PARAMETERS_RELATION, self.s3_rel_id) + harness.charm.backup.s3_client.on.credentials_changed.emit( + relation=harness.model.get_relation(S3_PARAMETERS_RELATION, s3_rel_id) ) _defer.assert_not_called() _render_pgbackrest_conf_file.assert_called_once() @@ -931,23 +935,23 @@ def test_on_s3_credential_changed( _initialise_stanza.assert_not_called() # Test that followers will not initialise the bucket - self.charm.unit.status = ActiveStatus() + harness.charm.unit.status = ActiveStatus() _render_pgbackrest_conf_file.reset_mock() - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, {"cluster_initialised": "True"}, ) _render_pgbackrest_conf_file.return_value = True _is_primary.return_value = False - self.charm.backup.s3_client.on.credentials_changed.emit( - relation=self.harness.model.get_relation(S3_PARAMETERS_RELATION, self.s3_rel_id) + harness.charm.backup.s3_client.on.credentials_changed.emit( + relation=harness.model.get_relation(S3_PARAMETERS_RELATION, s3_rel_id) ) _render_pgbackrest_conf_file.assert_called_once() _create_bucket_if_not_exists.assert_not_called() - self.assertIsInstance(self.charm.unit.status, ActiveStatus) + tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) _can_use_s3_repository.assert_not_called() _initialise_stanza.assert_not_called() @@ -964,14 +968,14 @@ def test_on_s3_credential_changed( _render_pgbackrest_conf_file.reset_mock() _create_bucket_if_not_exists.reset_mock() _create_bucket_if_not_exists.side_effect = error - self.charm.backup.s3_client.on.credentials_changed.emit( - relation=self.harness.model.get_relation(S3_PARAMETERS_RELATION, self.s3_rel_id) + harness.charm.backup.s3_client.on.credentials_changed.emit( + relation=harness.model.get_relation(S3_PARAMETERS_RELATION, s3_rel_id) ) _render_pgbackrest_conf_file.assert_called_once() _create_bucket_if_not_exists.assert_called_once() - self.assertIsInstance(self.charm.unit.status, BlockedStatus) - self.assertEqual( - self.charm.unit.status.message, FAILED_TO_ACCESS_CREATE_BUCKET_ERROR_MESSAGE + tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) + tc.assertEqual( + harness.charm.unit.status.message, FAILED_TO_ACCESS_CREATE_BUCKET_ERROR_MESSAGE ) _can_use_s3_repository.assert_not_called() _initialise_stanza.assert_not_called() @@ -980,11 +984,11 @@ def test_on_s3_credential_changed( _create_bucket_if_not_exists.reset_mock() _create_bucket_if_not_exists.side_effect = None _can_use_s3_repository.return_value = (False, "fake validation message") - self.charm.backup.s3_client.on.credentials_changed.emit( - relation=self.harness.model.get_relation(S3_PARAMETERS_RELATION, self.s3_rel_id) + harness.charm.backup.s3_client.on.credentials_changed.emit( + relation=harness.model.get_relation(S3_PARAMETERS_RELATION, s3_rel_id) ) - self.assertIsInstance(self.charm.unit.status, BlockedStatus) - self.assertEqual(self.charm.unit.status.message, "fake validation message") + tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) + tc.assertEqual(harness.charm.unit.status.message, "fake validation message") _create_bucket_if_not_exists.assert_called_once() _can_use_s3_repository.assert_called_once() _initialise_stanza.assert_not_called() @@ -992,81 +996,78 @@ def test_on_s3_credential_changed( # Test when the stanza can be initialised and the pgBackRest service can start. _can_use_s3_repository.reset_mock() _can_use_s3_repository.return_value = (True, None) - self.charm.backup.s3_client.on.credentials_changed.emit( - relation=self.harness.model.get_relation(S3_PARAMETERS_RELATION, self.s3_rel_id) + harness.charm.backup.s3_client.on.credentials_changed.emit( + relation=harness.model.get_relation(S3_PARAMETERS_RELATION, s3_rel_id) ) _can_use_s3_repository.assert_called_once() _initialise_stanza.assert_called_once() - def test_on_s3_credential_gone(self): - # Test that unrelated blocks will remain - self.charm.unit.status = BlockedStatus("test block") - self.charm.backup._on_s3_credential_gone(None) - self.assertIsInstance(self.charm.unit.status, BlockedStatus) - - # Test that s3 related blocks will be cleared - self.charm.unit.status = BlockedStatus(ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE) - self.charm.backup._on_s3_credential_gone(None) - self.assertIsInstance(self.charm.unit.status, ActiveStatus) - - # Test removal of relation data when the unit is not the leader. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, - {"stanza": "test-stanza", "init-pgbackrest": "True"}, - ) - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.app.name, - {"stanza": "test-stanza", "init-pgbackrest": "True"}, - ) - self.charm.backup._on_s3_credential_gone(None) - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.app), + +def test_on_s3_credential_gone(harness): + peer_rel_id = harness.model.get_relation(PEER).id + # Test that unrelated blocks will remain + harness.charm.unit.status = BlockedStatus("test block") + harness.charm.backup._on_s3_credential_gone(None) + tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) + + # Test that s3 related blocks will be cleared + harness.charm.unit.status = BlockedStatus(ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE) + harness.charm.backup._on_s3_credential_gone(None) + tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) + + # Test removal of relation data when the unit is not the leader. + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, {"stanza": "test-stanza", "init-pgbackrest": "True"}, ) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), {}) - - # Test removal of relation data when the unit is the leader. - with self.harness.hooks_disabled(): - self.harness.set_leader() - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.unit.name, - {"stanza": "test-stanza", "init-pgbackrest": "True"}, - ) - self.charm.backup._on_s3_credential_gone(None) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.app), {}) - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.unit), {}) - - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch("charm.PostgreSQLBackups._change_connectivity_to_database") - @patch("charm.PostgreSQLBackups._list_backups") - @patch("charm.PostgreSQLBackups._execute_command") - @patch("charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock) - @patch("charm.PostgreSQLBackups._upload_content_to_s3") - @patch("backups.datetime") - @patch("ops.JujuVersion.from_environ") - @patch("charm.PostgreSQLBackups._retrieve_s3_parameters") - @patch("charm.PostgreSQLBackups._can_unit_perform_backup") - def test_on_create_backup_action( - self, - _can_unit_perform_backup, - _retrieve_s3_parameters, - _from_environ, - _datetime, - _upload_content_to_s3, - _is_primary, - _execute_command, - _list_backups, - _change_connectivity_to_database, - _update_config, + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"stanza": "test-stanza", "init-pgbackrest": "True"}, + ) + harness.charm.backup._on_s3_credential_gone(None) + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app), + {"stanza": "test-stanza", "init-pgbackrest": "True"}, + ) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) + + # Test removal of relation data when the unit is the leader. + with harness.hooks_disabled(): + harness.set_leader() + harness.update_relation_data( + peer_rel_id, + harness.charm.unit.name, + {"stanza": "test-stanza", "init-pgbackrest": "True"}, + ) + harness.charm.backup._on_s3_credential_gone(None) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.unit), {}) + + +def test_on_create_backup_action(harness): + with ( + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch( + "charm.PostgreSQLBackups._change_connectivity_to_database" + ) as _change_connectivity_to_database, + patch("charm.PostgreSQLBackups._list_backups") as _list_backups, + patch("charm.PostgreSQLBackups._execute_command") as _execute_command, + patch( + "charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock + ) as _is_primary, + patch("charm.PostgreSQLBackups._upload_content_to_s3") as _upload_content_to_s3, + patch("backups.datetime") as _datetime, + patch("ops.JujuVersion.from_environ") as _from_environ, + patch("charm.PostgreSQLBackups._retrieve_s3_parameters") as _retrieve_s3_parameters, + patch("charm.PostgreSQLBackups._can_unit_perform_backup") as _can_unit_perform_backup, ): # Test when the unit cannot perform a backup. mock_event = MagicMock() _can_unit_perform_backup.return_value = (False, "fake validation message") - self.charm.backup._on_create_backup_action(mock_event) + harness.charm.backup._on_create_backup_action(mock_event) mock_event.fail.assert_called_once() mock_event.set_results.assert_not_called() @@ -1089,15 +1090,15 @@ def test_on_create_backup_action( _from_environ.return_value = "test-juju-version" _upload_content_to_s3.return_value = False expected_metadata = f"""Date Backup Requested: 2023-01-01T09:00:00Z -Model Name: {self.charm.model.name} -Application Name: {self.charm.model.app.name} -Unit Name: {self.charm.unit.name} +Model Name: {harness.charm.model.name} +Application Name: {harness.charm.model.app.name} +Unit Name: {harness.charm.unit.name} Juju Version: test-juju-version """ - self.charm.backup._on_create_backup_action(mock_event) + harness.charm.backup._on_create_backup_action(mock_event) _upload_content_to_s3.assert_called_once_with( expected_metadata, - f"test-path/backup/{self.charm.model.name}.{self.charm.cluster_name}/latest", + f"test-path/backup/{harness.charm.model.name}.{harness.charm.cluster_name}/latest", mock_s3_parameters, ) mock_event.fail.assert_called_once() @@ -1110,7 +1111,7 @@ def test_on_create_backup_action( _execute_command.side_effect = ExecError( command="fake command".split(), exit_code=1, stdout="", stderr="fake error" ) - self.charm.backup._on_create_backup_action(mock_event) + harness.charm.backup._on_create_backup_action(mock_event) update_config_calls = [ call(is_creating_backup=True), call(is_creating_backup=False), @@ -1125,18 +1126,18 @@ def test_on_create_backup_action( _upload_content_to_s3.side_effect = [True, False] _execute_command.side_effect = None _execute_command.return_value = "fake stdout", "fake stderr" - _list_backups.return_value = {"2023-01-01T09:00:00Z": self.charm.backup.stanza_name} + _list_backups.return_value = {"2023-01-01T09:00:00Z": harness.charm.backup.stanza_name} _update_config.reset_mock() - self.charm.backup._on_create_backup_action(mock_event) + harness.charm.backup._on_create_backup_action(mock_event) _upload_content_to_s3.assert_has_calls([ call( expected_metadata, - f"test-path/backup/{self.charm.model.name}.{self.charm.cluster_name}/latest", + f"test-path/backup/{harness.charm.model.name}.{harness.charm.cluster_name}/latest", mock_s3_parameters, ), call( "Stdout:\nfake stdout\n\nStderr:\nfake stderr\n", - f"test-path/backup/{self.charm.model.name}.{self.charm.cluster_name}/2023-01-01T09:00:00Z/backup.log", + f"test-path/backup/{harness.charm.model.name}.{harness.charm.cluster_name}/2023-01-01T09:00:00Z/backup.log", mock_s3_parameters, ), ]) @@ -1150,16 +1151,16 @@ def test_on_create_backup_action( _upload_content_to_s3.side_effect = None _upload_content_to_s3.return_value = True _update_config.reset_mock() - self.charm.backup._on_create_backup_action(mock_event) + harness.charm.backup._on_create_backup_action(mock_event) _upload_content_to_s3.assert_has_calls([ call( expected_metadata, - f"test-path/backup/{self.charm.model.name}.{self.charm.cluster_name}/latest", + f"test-path/backup/{harness.charm.model.name}.{harness.charm.cluster_name}/latest", mock_s3_parameters, ), call( "Stdout:\nfake stdout\n\nStderr:\nfake stderr\n", - f"test-path/backup/{self.charm.model.name}.{self.charm.cluster_name}/2023-01-01T09:00:00Z/backup.log", + f"test-path/backup/{harness.charm.model.name}.{harness.charm.cluster_name}/2023-01-01T09:00:00Z/backup.log", mock_s3_parameters, ), ]) @@ -1172,30 +1173,35 @@ def test_on_create_backup_action( mock_event.reset_mock() _upload_content_to_s3.reset_mock() _is_primary.return_value = False - self.charm.backup._on_create_backup_action(mock_event) + harness.charm.backup._on_create_backup_action(mock_event) _upload_content_to_s3.assert_has_calls([ call( expected_metadata, - f"test-path/backup/{self.charm.model.name}.{self.charm.cluster_name}/latest", + f"test-path/backup/{harness.charm.model.name}.{harness.charm.cluster_name}/latest", mock_s3_parameters, ), call( "Stdout:\nfake stdout\n\nStderr:\nfake stderr\n", - f"test-path/backup/{self.charm.model.name}.{self.charm.cluster_name}/2023-01-01T09:00:00Z/backup.log", + f"test-path/backup/{harness.charm.model.name}.{harness.charm.cluster_name}/2023-01-01T09:00:00Z/backup.log", mock_s3_parameters, ), ]) - self.assertEqual(_change_connectivity_to_database.call_count, 2) + tc.assertEqual(_change_connectivity_to_database.call_count, 2) mock_event.fail.assert_not_called() mock_event.set_results.assert_called_once_with({"backup-status": "backup created"}) - @patch("charm.PostgreSQLBackups._generate_backup_list_output") - @patch("charm.PostgreSQLBackups._are_backup_settings_ok") - def test_on_list_backups_action(self, _are_backup_settings_ok, _generate_backup_list_output): + +def test_on_list_backups_action(harness): + with ( + patch( + "charm.PostgreSQLBackups._generate_backup_list_output" + ) as _generate_backup_list_output, + patch("charm.PostgreSQLBackups._are_backup_settings_ok") as _are_backup_settings_ok, + ): # Test when not all backup settings are ok. mock_event = MagicMock() _are_backup_settings_ok.return_value = (False, "fake validation message") - self.charm.backup._on_list_backups_action(mock_event) + harness.charm.backup._on_list_backups_action(mock_event) mock_event.fail.assert_called_once() _generate_backup_list_output.assert_not_called() mock_event.set_results.assert_not_called() @@ -1206,7 +1212,7 @@ def test_on_list_backups_action(self, _are_backup_settings_ok, _generate_backup_ _generate_backup_list_output.side_effect = ExecError( command="fake command".split(), exit_code=1, stdout="", stderr="fake error" ) - self.charm.backup._on_list_backups_action(mock_event) + harness.charm.backup._on_list_backups_action(mock_event) _generate_backup_list_output.assert_called_once() mock_event.fail.assert_called_once() mock_event.set_results.assert_not_called() @@ -1220,7 +1226,7 @@ def test_on_list_backups_action(self, _are_backup_settings_ok, _generate_backup_ ---------------------------------------------------- 2023-01-01T09:00:00Z | physical | failed: fake error 2023-01-01T10:00:00Z | physical | finished""" - self.charm.backup._on_list_backups_action(mock_event) + harness.charm.backup._on_list_backups_action(mock_event) _generate_backup_list_output.assert_called_once() mock_event.set_results.assert_called_once_with({ "backups": """backup-id | backup-type | backup-status @@ -1230,32 +1236,25 @@ def test_on_list_backups_action(self, _are_backup_settings_ok, _generate_backup_ }) mock_event.fail.assert_not_called() - @patch("ops.model.Container.start") - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch("charm.PostgresqlOperatorCharm._create_pgdata") - @patch("charm.PostgreSQLBackups._empty_data_files") - @patch("charm.PostgreSQLBackups._restart_database") - @patch("lightkube.Client.delete") - @patch("ops.model.Container.stop") - @patch("charm.PostgreSQLBackups._list_backups") - @patch("charm.PostgreSQLBackups._pre_restore_checks") - def test_on_restore_action( - self, - _pre_restore_checks, - _list_backups, - _stop, - _delete, - _restart_database, - _empty_data_files, - _create_pgdata, - _update_config, - _start, + +def test_on_restore_action(harness): + with ( + patch("ops.model.Container.start") as _start, + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch("charm.PostgresqlOperatorCharm._create_pgdata") as _create_pgdata, + patch("charm.PostgreSQLBackups._empty_data_files") as _empty_data_files, + patch("charm.PostgreSQLBackups._restart_database") as _restart_database, + patch("lightkube.Client.delete") as _delete, + patch("ops.model.Container.stop") as _stop, + patch("charm.PostgreSQLBackups._list_backups") as _list_backups, + patch("charm.PostgreSQLBackups._pre_restore_checks") as _pre_restore_checks, ): + peer_rel_id = harness.model.get_relation(PEER).id # Test when pre restore checks fail. mock_event = MagicMock() _pre_restore_checks.return_value = False - self.charm.unit.status = ActiveStatus() - self.charm.backup._on_restore_action(mock_event) + harness.charm.unit.status = ActiveStatus() + harness.charm.backup._on_restore_action(mock_event) _list_backups.assert_not_called() _stop.assert_not_called() _delete.assert_not_called() @@ -1266,14 +1265,14 @@ def test_on_restore_action( _start.assert_not_called() mock_event.fail.assert_not_called() mock_event.set_results.assert_not_called() - self.assertNotIsInstance(self.charm.unit.status, MaintenanceStatus) + tc.assertNotIsInstance(harness.charm.unit.status, MaintenanceStatus) # Test when the user provides an invalid backup id. mock_event.params = {"backup-id": "2023-01-01T10:00:00Z"} _pre_restore_checks.return_value = True - _list_backups.return_value = {"2023-01-01T09:00:00Z": self.charm.backup.stanza_name} - self.charm.unit.status = ActiveStatus() - self.charm.backup._on_restore_action(mock_event) + _list_backups.return_value = {"2023-01-01T09:00:00Z": harness.charm.backup.stanza_name} + harness.charm.unit.status = ActiveStatus() + harness.charm.backup._on_restore_action(mock_event) _list_backups.assert_called_once_with(show_failed=False) mock_event.fail.assert_called_once() _stop.assert_not_called() @@ -1284,7 +1283,7 @@ def test_on_restore_action( _update_config.assert_not_called() _start.assert_not_called() mock_event.set_results.assert_not_called() - self.assertNotIsInstance(self.charm.unit.status, MaintenanceStatus) + tc.assertNotIsInstance(harness.charm.unit.status, MaintenanceStatus) # Test when the charm fails to stop the workload. mock_event.reset_mock() @@ -1303,7 +1302,7 @@ def test_on_restore_action( datetime.datetime.now(), ), ) - self.charm.backup._on_restore_action(mock_event) + harness.charm.backup._on_restore_action(mock_event) _stop.assert_called_once_with("postgresql") mock_event.fail.assert_called_once() _delete.assert_not_called() @@ -1319,8 +1318,8 @@ def test_on_restore_action( mock_event.params = {"backup-id": "2023-01-01T09:00:00Z"} _stop.side_effect = None _delete.side_effect = [None, _FakeApiError] - self.charm.backup._on_restore_action(mock_event) - self.assertEqual(_delete.call_count, 2) + harness.charm.backup._on_restore_action(mock_event) + tc.assertEqual(_delete.call_count, 2) mock_event.fail.assert_called_once() _restart_database.assert_called_once() _empty_data_files.assert_not_called() @@ -1336,7 +1335,7 @@ def test_on_restore_action( _empty_data_files.side_effect = ExecError( command="fake command".split(), exit_code=1, stdout="", stderr="fake error" ) - self.charm.backup._on_restore_action(mock_event) + harness.charm.backup._on_restore_action(mock_event) _empty_data_files.assert_called_once() mock_event.fail.assert_called_once() _restart_database.assert_called_once() @@ -1349,14 +1348,14 @@ def test_on_restore_action( mock_event.reset_mock() _restart_database.reset_mock() _empty_data_files.side_effect = None - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.app), {}) - self.charm.backup._on_restore_action(mock_event) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) + harness.charm.backup._on_restore_action(mock_event) _restart_database.assert_not_called() - self.assertEqual( - self.harness.get_relation_data(self.peer_rel_id, self.charm.app), + tc.assertEqual( + harness.get_relation_data(peer_rel_id, harness.charm.app), { "restoring-backup": "20230101-090000F", - "restore-stanza": f"{self.charm.model.name}.{self.charm.cluster_name}", + "restore-stanza": f"{harness.charm.model.name}.{harness.charm.cluster_name}", }, ) _create_pgdata.assert_called_once() @@ -1365,59 +1364,65 @@ def test_on_restore_action( mock_event.fail.assert_not_called() mock_event.set_results.assert_called_once_with({"restore-status": "restore started"}) - @patch("ops.model.Application.planned_units") - @patch("charm.PostgreSQLBackups._are_backup_settings_ok") - def test_pre_restore_checks(self, _are_backup_settings_ok, _planned_units): + +def test_pre_restore_checks(harness): + with ( + patch("ops.model.Application.planned_units") as _planned_units, + patch("charm.PostgreSQLBackups._are_backup_settings_ok") as _are_backup_settings_ok, + ): # Test when S3 parameters are not ok. mock_event = MagicMock() _are_backup_settings_ok.return_value = (False, "fake error message") - self.assertEqual(self.charm.backup._pre_restore_checks(mock_event), False) + tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) mock_event.fail.assert_called_once() # Test when no backup id is provided. mock_event.reset_mock() _are_backup_settings_ok.return_value = (True, None) - self.assertEqual(self.charm.backup._pre_restore_checks(mock_event), False) + tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) mock_event.fail.assert_called_once() # Test when the workload container is not accessible yet. mock_event.reset_mock() mock_event.params = {"backup-id": "2023-01-01T09:00:00Z"} - self.assertEqual(self.charm.backup._pre_restore_checks(mock_event), False) + tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) mock_event.fail.assert_called_once() # Test when the unit is in a blocked state that is not recoverable by changing # S3 parameters. mock_event.reset_mock() - self.harness.set_can_connect("postgresql", True) - self.charm.unit.status = BlockedStatus("fake blocked state") - self.assertEqual(self.charm.backup._pre_restore_checks(mock_event), False) + harness.set_can_connect("postgresql", True) + harness.charm.unit.status = BlockedStatus("fake blocked state") + tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) mock_event.fail.assert_called_once() # Test when the unit is in a blocked state that is recoverable by changing S3 parameters, # but the cluster has more than one unit. mock_event.reset_mock() - self.charm.unit.status = BlockedStatus(ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE) + harness.charm.unit.status = BlockedStatus(ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE) _planned_units.return_value = 2 - self.assertEqual(self.charm.backup._pre_restore_checks(mock_event), False) + tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) mock_event.fail.assert_called_once() # Test when the cluster has only one unit, but it's not the leader yet. mock_event.reset_mock() _planned_units.return_value = 1 - self.assertEqual(self.charm.backup._pre_restore_checks(mock_event), False) + tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), False) mock_event.fail.assert_called_once() # Test when everything is ok to run a restore. mock_event.reset_mock() - with self.harness.hooks_disabled(): - self.harness.set_leader() - self.assertEqual(self.charm.backup._pre_restore_checks(mock_event), True) + with harness.hooks_disabled(): + harness.set_leader() + tc.assertEqual(harness.charm.backup._pre_restore_checks(mock_event), True) mock_event.fail.assert_not_called() - @patch("ops.model.Container.push") - @patch("charm.PostgreSQLBackups._retrieve_s3_parameters") - def test_render_pgbackrest_conf_file(self, _retrieve_s3_parameters, _push): + +def test_render_pgbackrest_conf_file(harness): + with ( + patch("ops.model.Container.push") as _push, + patch("charm.PostgreSQLBackups._retrieve_s3_parameters") as _retrieve_s3_parameters, + ): # Set up a mock for the `open` method, set returned data to postgresql.conf template. with open("templates/pgbackrest.conf.j2", "r") as f: mock = mock_open(read_data=f.read()) @@ -1428,7 +1433,7 @@ def test_render_pgbackrest_conf_file(self, _retrieve_s3_parameters, _push): # Patch the `open` method with our mock. with patch("builtins.open", mock, create=True): # Call the method - self.charm.backup._render_pgbackrest_conf_file() + harness.charm.backup._render_pgbackrest_conf_file() mock.assert_not_called() _push.assert_not_called() @@ -1451,8 +1456,9 @@ def test_render_pgbackrest_conf_file(self, _retrieve_s3_parameters, _push): with open("templates/pgbackrest.conf.j2") as file: template = Template(file.read()) expected_content = template.render( - enable_tls=self.charm.is_tls_enabled and len(self.charm.peer_members_endpoints) > 0, - peer_endpoints=self.charm.peer_members_endpoints, + enable_tls=harness.charm.is_tls_enabled + and len(harness.charm.peer_members_endpoints) > 0, + peer_endpoints=harness.charm.peer_members_endpoints, path="test-path/", region="us-east-1", endpoint="https://storage.googleapis.com", @@ -1460,18 +1466,18 @@ def test_render_pgbackrest_conf_file(self, _retrieve_s3_parameters, _push): s3_uri_style="path", access_key="test-access-key", secret_key="test-secret-key", - stanza=self.charm.backup.stanza_name, - storage_path=self.charm._storage_path, + stanza=harness.charm.backup.stanza_name, + storage_path=harness.charm._storage_path, user="backup", ) # Patch the `open` method with our mock. with patch("builtins.open", mock, create=True): # Call the method - self.charm.backup._render_pgbackrest_conf_file() + harness.charm.backup._render_pgbackrest_conf_file() # Check the template is opened read-only in the call to open. - self.assertEqual(mock.call_args_list[0][0], ("templates/pgbackrest.conf.j2", "r")) + tc.assertEqual(mock.call_args_list[0][0], ("templates/pgbackrest.conf.j2", "r")) # Ensure the correct rendered template is sent to _render_file method. _push.assert_called_once_with( @@ -1481,29 +1487,38 @@ def test_render_pgbackrest_conf_file(self, _retrieve_s3_parameters, _push): group="postgres", ) - @patch("ops.model.Container.start") - @patch("charm.PostgresqlOperatorCharm.update_config") - def test_restart_database(self, _update_config, _start): - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.charm.unit.name, + +def test_restart_database(harness): + with ( + patch("ops.model.Container.start") as _start, + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + ): + peer_rel_id = harness.model.get_relation(PEER).id + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.unit.name, {"restoring-backup": "2023-01-01T09:00:00Z"}, ) - self.charm.backup._restart_database() + harness.charm.backup._restart_database() # Assert that the backup id is not in the application relation databag anymore. - self.assertEqual(self.harness.get_relation_data(self.peer_rel_id, self.charm.app), {}) + tc.assertEqual(harness.get_relation_data(peer_rel_id, harness.charm.app), {}) _update_config.assert_called_once() _start.assert_called_once_with("postgresql") - @patch("charms.data_platform_libs.v0.s3.S3Requirer.get_s3_connection_info") - def test_retrieve_s3_parameters(self, _get_s3_connection_info): + +def test_retrieve_s3_parameters( + harness, +): + with patch( + "charms.data_platform_libs.v0.s3.S3Requirer.get_s3_connection_info" + ) as _get_s3_connection_info: # Test when there are missing S3 parameters. _get_s3_connection_info.return_value = {} - self.assertEqual( - self.charm.backup._retrieve_s3_parameters(), + tc.assertEqual( + harness.charm.backup._retrieve_s3_parameters(), ({}, ["bucket", "access-key", "secret-key"]), ) @@ -1513,8 +1528,8 @@ def test_retrieve_s3_parameters(self, _get_s3_connection_info): "access-key": "test-access-key", "secret-key": "test-secret-key", } - self.assertEqual( - self.charm.backup._retrieve_s3_parameters(), + tc.assertEqual( + harness.charm.backup._retrieve_s3_parameters(), ( { "access-key": "test-access-key", @@ -1539,8 +1554,8 @@ def test_retrieve_s3_parameters(self, _get_s3_connection_info): "region": " us-east-1 ", "s3-uri-style": " path ", } - self.assertEqual( - self.charm.backup._retrieve_s3_parameters(), + tc.assertEqual( + harness.charm.backup._retrieve_s3_parameters(), ( { "access-key": "test-access-key", @@ -1555,31 +1570,33 @@ def test_retrieve_s3_parameters(self, _get_s3_connection_info): ), ) - @patch( - "charm.PostgreSQLBackups._is_primary_pgbackrest_service_running", new_callable=PropertyMock - ) - @patch("charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock) - @patch("ops.model.Container.restart") - @patch("ops.model.Container.stop") - @patch("charm.PostgresqlOperatorCharm.peer_members_endpoints", new_callable=PropertyMock) - @patch("charm.PostgresqlOperatorCharm.is_tls_enabled", new_callable=PropertyMock) - @patch("charm.PostgreSQLBackups._render_pgbackrest_conf_file") - @patch("charm.PostgreSQLBackups._are_backup_settings_ok") - def test_start_stop_pgbackrest_service( - self, - _are_backup_settings_ok, - _render_pgbackrest_conf_file, - _is_tls_enabled, - _peer_members_endpoints, - _stop, - _restart, - _is_primary, - _is_primary_pgbackrest_service_running, + +def test_start_stop_pgbackrest_service(harness): + with ( + patch( + "charm.PostgreSQLBackups._is_primary_pgbackrest_service_running", + new_callable=PropertyMock, + ) as _is_primary_pgbackrest_service_running, + patch( + "charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock + ) as _is_primary, + patch("ops.model.Container.restart") as _restart, + patch("ops.model.Container.stop") as _stop, + patch( + "charm.PostgresqlOperatorCharm.peer_members_endpoints", new_callable=PropertyMock + ) as _peer_members_endpoints, + patch( + "charm.PostgresqlOperatorCharm.is_tls_enabled", new_callable=PropertyMock + ) as _is_tls_enabled, + patch( + "charm.PostgreSQLBackups._render_pgbackrest_conf_file" + ) as _render_pgbackrest_conf_file, + patch("charm.PostgreSQLBackups._are_backup_settings_ok") as _are_backup_settings_ok, ): # Test when S3 parameters are not ok (no operation, but returns success). _are_backup_settings_ok.return_value = (False, "fake error message") - self.assertEqual( - self.charm.backup.start_stop_pgbackrest_service(), + tc.assertEqual( + harness.charm.backup.start_stop_pgbackrest_service(), True, ) _stop.assert_not_called() @@ -1588,8 +1605,8 @@ def test_start_stop_pgbackrest_service( # Test when it was not possible to render the pgBackRest configuration file. _are_backup_settings_ok.return_value = (True, None) _render_pgbackrest_conf_file.return_value = False - self.assertEqual( - self.charm.backup.start_stop_pgbackrest_service(), + tc.assertEqual( + harness.charm.backup.start_stop_pgbackrest_service(), False, ) _stop.assert_not_called() @@ -1598,8 +1615,8 @@ def test_start_stop_pgbackrest_service( # Test when TLS is not enabled (should stop the service). _render_pgbackrest_conf_file.return_value = True _is_tls_enabled.return_value = False - self.assertEqual( - self.charm.backup.start_stop_pgbackrest_service(), + tc.assertEqual( + harness.charm.backup.start_stop_pgbackrest_service(), True, ) _stop.assert_called_once() @@ -1609,8 +1626,8 @@ def test_start_stop_pgbackrest_service( _stop.reset_mock() _is_tls_enabled.return_value = True _peer_members_endpoints.return_value = [] - self.assertEqual( - self.charm.backup.start_stop_pgbackrest_service(), + tc.assertEqual( + harness.charm.backup.start_stop_pgbackrest_service(), True, ) _stop.assert_called_once() @@ -1621,8 +1638,8 @@ def test_start_stop_pgbackrest_service( _peer_members_endpoints.return_value = ["fake-member-endpoint"] _is_primary.return_value = False _is_primary_pgbackrest_service_running.return_value = False - self.assertEqual( - self.charm.backup.start_stop_pgbackrest_service(), + tc.assertEqual( + harness.charm.backup.start_stop_pgbackrest_service(), False, ) _stop.assert_not_called() @@ -1630,8 +1647,8 @@ def test_start_stop_pgbackrest_service( # Test when the service has already started in the primary. _is_primary_pgbackrest_service_running.return_value = True - self.assertEqual( - self.charm.backup.start_stop_pgbackrest_service(), + tc.assertEqual( + harness.charm.backup.start_stop_pgbackrest_service(), True, ) _stop.assert_not_called() @@ -1641,17 +1658,20 @@ def test_start_stop_pgbackrest_service( _restart.reset_mock() _is_primary.return_value = True _is_primary_pgbackrest_service_running.return_value = False - self.assertEqual( - self.charm.backup.start_stop_pgbackrest_service(), + tc.assertEqual( + harness.charm.backup.start_stop_pgbackrest_service(), True, ) _stop.assert_not_called() _restart.assert_called_once() - @patch("tempfile.NamedTemporaryFile") - @patch("charm.PostgreSQLBackups._construct_endpoint") - @patch("boto3.session.Session.resource") - def test_upload_content_to_s3(self, _resource, _construct_endpoint, _named_temporary_file): + +def test_upload_content_to_s3(harness): + with ( + patch("tempfile.NamedTemporaryFile") as _named_temporary_file, + patch("charm.PostgreSQLBackups._construct_endpoint") as _construct_endpoint, + patch("boto3.session.Session.resource") as _resource, + ): # Set some parameters. content = "test-content" s3_path = "test-file." @@ -1669,8 +1689,8 @@ def test_upload_content_to_s3(self, _resource, _construct_endpoint, _named_tempo _resource.side_effect = ValueError _construct_endpoint.return_value = "https://s3.us-east-1.amazonaws.com" _named_temporary_file.return_value.__enter__.return_value.name = "/tmp/test-file" - self.assertEqual( - self.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters), + tc.assertEqual( + harness.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters), False, ) _resource.assert_called_once_with("s3", endpoint_url="https://s3.us-east-1.amazonaws.com") @@ -1680,8 +1700,8 @@ def test_upload_content_to_s3(self, _resource, _construct_endpoint, _named_tempo _resource.reset_mock() _resource.side_effect = None upload_file.side_effect = S3UploadFailedError - self.assertEqual( - self.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters), + tc.assertEqual( + harness.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters), False, ) _resource.assert_called_once_with("s3", endpoint_url="https://s3.us-east-1.amazonaws.com") @@ -1693,8 +1713,8 @@ def test_upload_content_to_s3(self, _resource, _construct_endpoint, _named_tempo _named_temporary_file.reset_mock() upload_file.reset_mock() upload_file.side_effect = None - self.assertEqual( - self.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters), + tc.assertEqual( + harness.charm.backup._upload_content_to_s3(content, s3_path, s3_parameters), True, ) _resource.assert_called_once_with("s3", endpoint_url="https://s3.us-east-1.amazonaws.com") diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index fbae133dc0..bcdd92e56c 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -3,8 +3,8 @@ import itertools import json import logging -import unittest from datetime import datetime +from unittest import TestCase from unittest.mock import MagicMock, Mock, PropertyMock, patch import pytest @@ -19,7 +19,6 @@ ) from ops.pebble import Change, ChangeError, ChangeID, ServiceStatus from ops.testing import Harness -from parameterized import parameterized from requests import ConnectionError from tenacity import RetryError, wait_fixed @@ -28,48 +27,45 @@ from tests.helpers import patch_network_get from tests.unit.helpers import _FakeApiError - -class TestCharm(unittest.TestCase): - @patch("charm.KubernetesServicePatch", lambda x, y: None) - @patch_network_get(private_address="1.1.1.1") - def setUp(self): - self._peer_relation = PEER - self._postgresql_container = "postgresql" - self._postgresql_service = "postgresql" - self._metrics_service = "metrics_server" - self.pgbackrest_server_service = "pgbackrest server" - - self.harness = Harness(PostgresqlOperatorCharm) - self.harness.handle_exec("postgresql", ["locale", "-a"], result="C") - self.addCleanup(self.harness.cleanup) - self.rel_id = self.harness.add_relation(self._peer_relation, "postgresql-k8s") - self.harness.begin() - self.charm = self.harness.charm - self._cluster_name = f"patroni-{self.charm.app.name}" - self._context = { - "namespace": self.harness.model.name, - "app_name": self.harness.model.app.name, - } - - @pytest.fixture - def use_caplog(self, caplog): - self._caplog = caplog - - @patch("charm.PostgresqlOperatorCharm._add_members") - @patch("charm.Client") - @patch("charm.new_password", return_value="sekr1t") - @patch("charm.PostgresqlOperatorCharm.get_secret", return_value=None) - @patch("charm.PostgresqlOperatorCharm.set_secret") - @patch("charm.Patroni.reload_patroni_configuration") - @patch("charm.PostgresqlOperatorCharm._patch_pod_labels") - @patch("charm.PostgresqlOperatorCharm._create_services") - def test_on_leader_elected(self, _, __, ___, _set_secret, _get_secret, _____, _client, ______): +POSTGRESQL_CONTAINER = "postgresql" +POSTGRESQL_SERVICE = "postgresql" +METRICS_SERVICE = "metrics_server" +PGBACKREST_SERVER_SERVICE = "pgbackrest server" + +# used for assert functions +tc = TestCase() + + +@pytest.fixture(autouse=True) +def harness(): + with patch("charm.KubernetesServicePatch", lambda x, y: None): + harness = Harness(PostgresqlOperatorCharm) + harness.handle_exec("postgresql", ["locale", "-a"], result="C") + + harness.add_relation(PEER, "postgresql-k8s") + harness.begin() + yield harness + harness.cleanup() + + +def test_on_leader_elected(harness): + with ( + patch("charm.PostgresqlOperatorCharm._add_members"), + patch("charm.Client") as _client, + patch("charm.new_password", return_value="sekr1t"), + patch("charm.PostgresqlOperatorCharm.get_secret", return_value=None) as _get_secret, + patch("charm.PostgresqlOperatorCharm.set_secret") as _set_secret, + patch("charm.Patroni.reload_patroni_configuration"), + patch("charm.PostgresqlOperatorCharm._patch_pod_labels"), + patch("charm.PostgresqlOperatorCharm._create_services"), + ): + rel_id = harness.model.get_relation(PEER).id # Check that a new password was generated on leader election and nothing is done # because the "leader" key is present in the endpoint annotations due to a scale # down to zero units. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, self.charm.app.name, {"cluster_initialised": "True"} + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, harness.charm.app.name, {"cluster_initialised": "True"} ) _client.return_value.get.return_value = MagicMock( metadata=MagicMock(annotations=["leader"]) @@ -78,19 +74,17 @@ def test_on_leader_elected(self, _, __, ___, _set_secret, _get_secret, _____, _c [MagicMock(metadata=MagicMock(name="fakeName1", namespace="fakeNamespace"))], [MagicMock(metadata=MagicMock(name="fakeName2", namespace="fakeNamespace"))], ] - self.harness.set_leader() + harness.set_leader() assert _set_secret.call_count == 4 _set_secret.assert_any_call("app", "operator-password", "sekr1t") _set_secret.assert_any_call("app", "replication-password", "sekr1t") _set_secret.assert_any_call("app", "rewind-password", "sekr1t") _set_secret.assert_any_call("app", "monitoring-password", "sekr1t") _client.return_value.get.assert_called_once_with( - Endpoints, name=self._cluster_name, namespace=self.charm.model.name + Endpoints, name=f"patroni-{harness.charm.app.name}", namespace=harness.charm.model.name ) _client.return_value.patch.assert_not_called() - self.assertIn( - "cluster_initialised", self.harness.get_relation_data(self.rel_id, self.charm.app) - ) + tc.assertIn("cluster_initialised", harness.get_relation_data(rel_id, harness.charm.app)) # Trigger a new leader election and check that the password is still the same, and that the charm # fixes the missing "leader" key in the endpoint annotations. @@ -98,60 +92,55 @@ def test_on_leader_elected(self, _, __, ___, _set_secret, _get_secret, _____, _c _client.return_value.get.return_value = MagicMock(metadata=MagicMock(annotations=[])) _set_secret.reset_mock() _get_secret.return_value = "test" - self.harness.set_leader(False) - self.harness.set_leader() + harness.set_leader(False) + harness.set_leader() assert _set_secret.call_count == 0 _client.return_value.get.assert_called_once_with( - Endpoints, name=self._cluster_name, namespace=self.charm.model.name + Endpoints, name=f"patroni-{harness.charm.app.name}", namespace=harness.charm.model.name ) _client.return_value.patch.assert_called_once_with( Endpoints, - name=self._cluster_name, - namespace=self.charm.model.name, + name=f"patroni-{harness.charm.app.name}", + namespace=harness.charm.model.name, obj={"metadata": {"annotations": {"leader": "postgresql-k8s-0"}}}, ) - self.assertNotIn( - "cluster_initialised", self.harness.get_relation_data(self.rel_id, self.charm.app) - ) + tc.assertNotIn("cluster_initialised", harness.get_relation_data(rel_id, harness.charm.app)) # Test a failure in fixing the "leader" key in the endpoint annotations. _client.return_value.patch.side_effect = _FakeApiError - with self.assertRaises(_FakeApiError): - self.harness.set_leader(False) - self.harness.set_leader() + with tc.assertRaises(_FakeApiError): + harness.set_leader(False) + harness.set_leader() # Test no failure if the resource doesn't exist. _client.return_value.patch.side_effect = _FakeApiError(404) - self.harness.set_leader(False) - self.harness.set_leader() - - @patch("charm.PostgresqlOperatorCharm._set_active_status") - @patch("charm.Patroni.rock_postgresql_version", new_callable=PropertyMock) - @patch("charm.Patroni.primary_endpoint_ready", new_callable=PropertyMock) - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch("charm.PostgresqlOperatorCharm.postgresql") - @patch( - "charm.PostgresqlOperatorCharm._create_services", side_effect=[None, _FakeApiError, None] - ) - @patch_network_get(private_address="1.1.1.1") - @patch("charm.Patroni.member_started") - @patch("charm.PostgresqlOperatorCharm.push_tls_files_to_workload") - @patch("charm.PostgresqlOperatorCharm._patch_pod_labels") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - @patch("charm.PostgresqlOperatorCharm._create_pgdata") - def test_on_postgresql_pebble_ready( - self, - _create_pgdata, - _, - __, - _push_tls_files_to_workload, - _member_started, - _create_services, - _postgresql, - ___, - _primary_endpoint_ready, - _rock_postgresql_version, - _set_active_status, + harness.set_leader(False) + harness.set_leader() + + +@patch_network_get(private_address="1.1.1.1") +def test_on_postgresql_pebble_ready(harness): + with ( + patch("charm.PostgresqlOperatorCharm._set_active_status") as _set_active_status, + patch( + "charm.Patroni.rock_postgresql_version", new_callable=PropertyMock + ) as _rock_postgresql_version, + patch( + "charm.Patroni.primary_endpoint_ready", new_callable=PropertyMock + ) as _primary_endpoint_ready, + patch("charm.PostgresqlOperatorCharm.update_config"), + patch("charm.PostgresqlOperatorCharm.postgresql") as _postgresql, + patch( + "charm.PostgresqlOperatorCharm._create_services", + side_effect=[None, _FakeApiError, None], + ) as _create_services, + patch("charm.Patroni.member_started") as _member_started, + patch( + "charm.PostgresqlOperatorCharm.push_tls_files_to_workload" + ) as _push_tls_files_to_workload, + patch("charm.PostgresqlOperatorCharm._patch_pod_labels"), + patch("charm.PostgresqlOperatorCharm._on_leader_elected"), + patch("charm.PostgresqlOperatorCharm._create_pgdata") as _create_pgdata, ): _rock_postgresql_version.return_value = "14.7" @@ -159,99 +148,100 @@ def test_on_postgresql_pebble_ready( _primary_endpoint_ready.side_effect = [False, True] # Check that the initial plan is empty. - self.harness.set_can_connect(self._postgresql_container, True) - plan = self.harness.get_container_pebble_plan(self._postgresql_container) - self.assertEqual(plan.to_dict(), {}) + harness.set_can_connect(POSTGRESQL_CONTAINER, True) + plan = harness.get_container_pebble_plan(POSTGRESQL_CONTAINER) + tc.assertEqual(plan.to_dict(), {}) # Get the current and the expected layer from the pebble plan and the _postgresql_layer # method, respectively. # TODO: test also replicas (DPE-398). - self.harness.set_leader() + harness.set_leader() # Check for a Waiting status when the primary k8s endpoint is not ready yet. - self.harness.container_pebble_ready(self._postgresql_container) + harness.container_pebble_ready(POSTGRESQL_CONTAINER) _create_pgdata.assert_called_once() - self.assertTrue(isinstance(self.harness.model.unit.status, WaitingStatus)) + tc.assertTrue(isinstance(harness.model.unit.status, WaitingStatus)) _set_active_status.assert_not_called() # Check for a Blocked status when a failure happens . - self.harness.container_pebble_ready(self._postgresql_container) - self.assertTrue(isinstance(self.harness.model.unit.status, BlockedStatus)) + harness.container_pebble_ready(POSTGRESQL_CONTAINER) + tc.assertTrue(isinstance(harness.model.unit.status, BlockedStatus)) _set_active_status.assert_not_called() # Check for the Active status. _push_tls_files_to_workload.reset_mock() - self.harness.container_pebble_ready(self._postgresql_container) - plan = self.harness.get_container_pebble_plan(self._postgresql_container) - expected = self.charm._postgresql_layer().to_dict() + harness.container_pebble_ready(POSTGRESQL_CONTAINER) + plan = harness.get_container_pebble_plan(POSTGRESQL_CONTAINER) + expected = harness.charm._postgresql_layer().to_dict() expected.pop("summary", "") expected.pop("description", "") # Check the plan is as expected. - self.assertEqual(plan.to_dict(), expected) + tc.assertEqual(plan.to_dict(), expected) _set_active_status.assert_called_once() - container = self.harness.model.unit.get_container(self._postgresql_container) - self.assertEqual(container.get_service(self._postgresql_service).is_running(), True) + container = harness.model.unit.get_container(POSTGRESQL_CONTAINER) + tc.assertEqual(container.get_service("postgresql").is_running(), True) _push_tls_files_to_workload.assert_called_once() - @patch("charm.Patroni.rock_postgresql_version", new_callable=PropertyMock) - @patch("charm.PostgresqlOperatorCharm._create_pgdata") - def test_on_postgresql_pebble_ready_no_connection(self, _, _rock_postgresql_version): + +def test_on_postgresql_pebble_ready_no_connection(harness): + with ( + patch( + "charm.Patroni.rock_postgresql_version", new_callable=PropertyMock + ) as _rock_postgresql_version, + patch("charm.PostgresqlOperatorCharm._create_pgdata"), + ): mock_event = MagicMock() - mock_event.workload = self.harness.model.unit.get_container(self._postgresql_container) + mock_event.workload = harness.model.unit.get_container(POSTGRESQL_CONTAINER) _rock_postgresql_version.return_value = "14.7" - self.charm._on_postgresql_pebble_ready(mock_event) + harness.charm._on_postgresql_pebble_ready(mock_event) # Event was deferred and status is still maintenance mock_event.defer.assert_called_once() mock_event.set_results.assert_not_called() - self.assertIsInstance(self.harness.model.unit.status, MaintenanceStatus) - - @pytest.mark.usefixtures("only_without_juju_secrets") - def test_on_get_password(self): - # Create a mock event and set passwords in peer relation data. - mock_event = MagicMock(params={}) - self.harness.update_relation_data( - self.rel_id, - self.charm.app.name, - { - "operator-password": "test-password", - "replication-password": "replication-test-password", - }, - ) - - # Test providing an invalid username. - mock_event.params["username"] = "user" - self.charm._on_get_password(mock_event) - mock_event.fail.assert_called_once() - mock_event.set_results.assert_not_called() - - # Test without providing the username option. - mock_event.reset_mock() - del mock_event.params["username"] - self.charm._on_get_password(mock_event) - mock_event.set_results.assert_called_once_with({"password": "test-password"}) - - # Also test providing the username option. - mock_event.reset_mock() - mock_event.params["username"] = "replication" - self.charm._on_get_password(mock_event) - mock_event.set_results.assert_called_once_with({"password": "replication-test-password"}) + tc.assertIsInstance(harness.model.unit.status, MaintenanceStatus) + + +def test_on_get_password(harness): + # Create a mock event and set passwords in peer relation data. + mock_event = MagicMock(params={}) + rel_id = harness.model.get_relation(PEER).id + harness.update_relation_data( + rel_id, + harness.charm.app.name, + { + "operator-password": "test-password", + "replication-password": "replication-test-password", + }, + ) - @patch("charm.Patroni.reload_patroni_configuration") - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch("charm.PostgresqlOperatorCharm.set_secret") - @patch("charm.PostgresqlOperatorCharm.postgresql") - @patch("charm.Patroni.are_all_members_ready") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - def test_on_set_password( - self, - _, - _are_all_members_ready, - _postgresql, - _set_secret, - _update_config, - _reload_patroni_configuration, + # Test providing an invalid username. + mock_event.params["username"] = "user" + harness.charm._on_get_password(mock_event) + mock_event.fail.assert_called_once() + mock_event.set_results.assert_not_called() + + # Test without providing the username option. + mock_event.reset_mock() + del mock_event.params["username"] + harness.charm._on_get_password(mock_event) + mock_event.set_results.assert_called_once_with({"password": "test-password"}) + + # Also test providing the username option. + mock_event.reset_mock() + mock_event.params["username"] = "replication" + harness.charm._on_get_password(mock_event) + mock_event.set_results.assert_called_once_with({"password": "replication-test-password"}) + + +def test_on_set_password(harness): + with ( + patch("charm.Patroni.reload_patroni_configuration") as _reload_patroni_configuration, + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch("charm.PostgresqlOperatorCharm.set_secret") as _set_secret, + patch("charm.PostgresqlOperatorCharm.postgresql") as _postgresql, + patch("charm.Patroni.are_all_members_ready") as _are_all_members_ready, + patch("charm.PostgresqlOperatorCharm._on_leader_elected"), ): # Create a mock event. mock_event = MagicMock(params={}) @@ -263,192 +253,195 @@ def test_on_set_password( ) # Test trying to set a password through a non leader unit. - self.charm._on_set_password(mock_event) + harness.charm._on_set_password(mock_event) mock_event.fail.assert_called_once() _set_secret.assert_not_called() # Test providing an invalid username. - self.harness.set_leader() + harness.set_leader() mock_event.reset_mock() mock_event.params["username"] = "user" - self.charm._on_set_password(mock_event) + harness.charm._on_set_password(mock_event) mock_event.fail.assert_called_once() _set_secret.assert_not_called() # Test without providing the username option but without all cluster members ready. mock_event.reset_mock() del mock_event.params["username"] - self.charm._on_set_password(mock_event) + harness.charm._on_set_password(mock_event) mock_event.fail.assert_called_once() _set_secret.assert_not_called() # Test for an error updating when updating the user password in the database. mock_event.reset_mock() - self.charm._on_set_password(mock_event) + harness.charm._on_set_password(mock_event) mock_event.fail.assert_called_once() _set_secret.assert_not_called() # Test without providing the username option. - self.charm._on_set_password(mock_event) - self.assertEqual(_set_secret.call_args_list[0][0][1], "operator-password") + harness.charm._on_set_password(mock_event) + tc.assertEqual(_set_secret.call_args_list[0][0][1], "operator-password") # Also test providing the username option. _set_secret.reset_mock() mock_event.params["username"] = "replication" - self.charm._on_set_password(mock_event) - self.assertEqual(_set_secret.call_args_list[0][0][1], "replication-password") + harness.charm._on_set_password(mock_event) + tc.assertEqual(_set_secret.call_args_list[0][0][1], "replication-password") # And test providing both the username and password options. _set_secret.reset_mock() mock_event.params["password"] = "replication-test-password" - self.charm._on_set_password(mock_event) + harness.charm._on_set_password(mock_event) _set_secret.assert_called_once_with( "app", "replication-password", "replication-test-password" ) - @patch_network_get(private_address="1.1.1.1") - @patch("charm.Patroni.get_primary") - def test_on_get_primary(self, _get_primary): + +@patch_network_get(private_address="1.1.1.1") +def test_on_get_primary(harness): + with patch("charm.Patroni.get_primary") as _get_primary: mock_event = Mock() _get_primary.return_value = "postgresql-k8s-1" - self.charm._on_get_primary(mock_event) + harness.charm._on_get_primary(mock_event) _get_primary.assert_called_once() mock_event.set_results.assert_called_once_with({"primary": "postgresql-k8s-1"}) - @patch_network_get(private_address="1.1.1.1") - @patch("charm.Patroni.get_primary") - def test_fail_to_get_primary(self, _get_primary): + +@patch_network_get(private_address="1.1.1.1") +def test_fail_to_get_primary(harness): + with patch("charm.Patroni.get_primary") as _get_primary: mock_event = Mock() _get_primary.side_effect = [RetryError("fake error")] - self.charm._on_get_primary(mock_event) + harness.charm._on_get_primary(mock_event) _get_primary.assert_called_once() mock_event.set_results.assert_not_called() - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._handle_processes_failures") - @patch("charm.Patroni.member_started") - @patch("charm.Patroni.get_primary") - @patch("ops.model.Container.pebble") - @patch("upgrade.PostgreSQLUpgrade.idle", return_value="idle") - def test_on_update_status( - self, - _, - _pebble, - _get_primary, - _member_started, - _handle_processes_failures, + +@patch_network_get(private_address="1.1.1.1") +def test_on_update_status(harness): + with ( + patch( + "charm.PostgresqlOperatorCharm._handle_processes_failures" + ) as _handle_processes_failures, + patch("charm.Patroni.member_started") as _member_started, + patch("charm.Patroni.get_primary") as _get_primary, + patch("ops.model.Container.pebble") as _pebble, + patch("upgrade.PostgreSQLUpgrade.idle", return_value="idle"), ): # Test before the PostgreSQL service is available. _pebble.get_services.return_value = [] - self.harness.set_can_connect(self._postgresql_container, True) - self.charm.on.update_status.emit() + harness.set_can_connect(POSTGRESQL_CONTAINER, True) + harness.charm.on.update_status.emit() _get_primary.assert_not_called() # Test when a failure need to be handled. _pebble.get_services.return_value = ["service data"] _handle_processes_failures.return_value = True - self.charm.on.update_status.emit() + harness.charm.on.update_status.emit() _get_primary.assert_not_called() # Check primary message not being set (current unit is not the primary). _handle_processes_failures.return_value = False _get_primary.side_effect = [ "postgresql-k8s/1", - self.charm.unit.name, + harness.charm.unit.name, ] - self.charm.on.update_status.emit() + harness.charm.on.update_status.emit() _get_primary.assert_called_once() - self.assertNotEqual( - self.harness.model.unit.status, + tc.assertNotEqual( + harness.model.unit.status, ActiveStatus("Primary"), ) # Test again and check primary message being set (current unit is the primary). - self.charm.on.update_status.emit() - self.assertEqual( - self.harness.model.unit.status, + harness.charm.on.update_status.emit() + tc.assertEqual( + harness.model.unit.status, ActiveStatus("Primary"), ) - @patch("charm.Patroni.get_primary") - @patch("ops.model.Container.pebble") - def test_on_update_status_no_connection(self, _pebble, _get_primary): - self.charm.on.update_status.emit() + +def test_on_update_status_no_connection(harness): + with ( + patch("charm.Patroni.get_primary") as _get_primary, + patch("ops.model.Container.pebble") as _pebble, + ): + harness.charm.on.update_status.emit() # Exits before calling anything. _pebble.get_services.assert_not_called() _get_primary.assert_not_called() - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._handle_processes_failures", return_value=False) - @patch("charm.Patroni.member_started") - @patch("charm.Patroni.get_primary") - @patch("ops.model.Container.pebble") - @patch("upgrade.PostgreSQLUpgrade.idle", return_value=True) - def test_on_update_status_with_error_on_get_primary( - self, _, _pebble, _get_primary, _member_started, _handle_processes_failures + +@patch_network_get(private_address="1.1.1.1") +def test_on_update_status_with_error_on_get_primary(harness): + with ( + patch( + "charm.PostgresqlOperatorCharm._handle_processes_failures", return_value=False + ) as _handle_processes_failures, + patch("charm.Patroni.member_started") as _member_started, + patch("charm.Patroni.get_primary") as _get_primary, + patch("ops.model.Container.pebble") as _pebble, + patch("upgrade.PostgreSQLUpgrade.idle", return_value=True), ): # Mock the access to the list of Pebble services. _pebble.get_services.return_value = ["service data"] _get_primary.side_effect = [RetryError("fake error")] - self.harness.set_can_connect(self._postgresql_container, True) + harness.set_can_connect(POSTGRESQL_CONTAINER, True) - with self.assertLogs("charm", "ERROR") as logs: - self.charm.on.update_status.emit() - self.assertIn( + with tc.assertLogs("charm", "ERROR") as logs: + harness.charm.on.update_status.emit() + tc.assertIn( "ERROR:charm:failed to get primary with error RetryError[fake error]", logs.output ) - @patch("charm.PostgresqlOperatorCharm._set_active_status") - @patch("charm.PostgresqlOperatorCharm._handle_processes_failures") - @patch("charm.PostgreSQLBackups.can_use_s3_repository") - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - @patch("ops.model.Container.pebble") - @patch("upgrade.PostgreSQLUpgrade.idle", return_value=True) - def test_on_update_status_after_restore_operation( - self, - _, - _pebble, - _member_started, - _update_config, - _can_use_s3_repository, - _handle_processes_failures, - _set_active_status, + +def test_on_update_status_after_restore_operation(harness): + with ( + patch("charm.PostgresqlOperatorCharm._set_active_status") as _set_active_status, + patch( + "charm.PostgresqlOperatorCharm._handle_processes_failures" + ) as _handle_processes_failures, + patch("charm.PostgreSQLBackups.can_use_s3_repository") as _can_use_s3_repository, + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("ops.model.Container.pebble") as _pebble, + patch("upgrade.PostgreSQLUpgrade.idle", return_value=True), ): + rel_id = harness.model.get_relation(PEER).id # Mock the access to the list of Pebble services to test a failed restore. _pebble.get_services.return_value = [MagicMock(current=ServiceStatus.INACTIVE)] # Test when the restore operation fails. - with self.harness.hooks_disabled(): - self.harness.set_leader() - self.harness.update_relation_data( - self.rel_id, - self.charm.app.name, + with harness.hooks_disabled(): + harness.set_leader() + harness.update_relation_data( + rel_id, + harness.charm.app.name, {"restoring-backup": "2023-01-01T09:00:00Z"}, ) - self.harness.set_can_connect(self._postgresql_container, True) - self.charm.on.update_status.emit() + harness.set_can_connect(POSTGRESQL_CONTAINER, True) + harness.charm.on.update_status.emit() _update_config.assert_not_called() _handle_processes_failures.assert_not_called() _set_active_status.assert_not_called() - self.assertIsInstance(self.charm.unit.status, BlockedStatus) + tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) # Test when the restore operation hasn't finished yet. - self.charm.unit.status = ActiveStatus() + harness.charm.unit.status = ActiveStatus() _pebble.get_services.return_value = [MagicMock(current=ServiceStatus.ACTIVE)] _member_started.return_value = False - self.charm.on.update_status.emit() + harness.charm.on.update_status.emit() _update_config.assert_not_called() _handle_processes_failures.assert_not_called() _set_active_status.assert_not_called() - self.assertIsInstance(self.charm.unit.status, ActiveStatus) + tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) # Assert that the backup id is still in the application relation databag. - self.assertEqual( - self.harness.get_relation_data(self.rel_id, self.charm.app), + tc.assertEqual( + harness.get_relation_data(rel_id, harness.charm.app), {"restoring-backup": "2023-01-01T09:00:00Z"}, ) @@ -456,85 +449,94 @@ def test_on_update_status_after_restore_operation( _member_started.return_value = True _can_use_s3_repository.return_value = (True, None) _handle_processes_failures.return_value = False - self.charm.on.update_status.emit() + harness.charm.on.update_status.emit() _update_config.assert_called_once() _handle_processes_failures.assert_called_once() _set_active_status.assert_called_once() - self.assertIsInstance(self.charm.unit.status, ActiveStatus) + tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) # Assert that the backup id is not in the application relation databag anymore. - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.charm.app), {}) + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app), {}) # Test when it's not possible to use the configured S3 repository. _update_config.reset_mock() _handle_processes_failures.reset_mock() _set_active_status.reset_mock() - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, - self.charm.app.name, + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + harness.charm.app.name, {"restoring-backup": "2023-01-01T09:00:00Z"}, ) _can_use_s3_repository.return_value = (False, "fake validation message") - self.charm.on.update_status.emit() + harness.charm.on.update_status.emit() _update_config.assert_called_once() _handle_processes_failures.assert_not_called() _set_active_status.assert_not_called() - self.assertIsInstance(self.charm.unit.status, BlockedStatus) - self.assertEqual(self.charm.unit.status.message, "fake validation message") + tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) + tc.assertEqual(harness.charm.unit.status.message, "fake validation message") # Assert that the backup id is not in the application relation databag anymore. - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.charm.app), {}) - - @patch("charms.data_platform_libs.v0.upgrade.DataUpgrade._upgrade_supported_check") - @patch("charm.PostgresqlOperatorCharm._patch_pod_labels", side_effect=[_FakeApiError, None]) - @patch( - "charm.PostgresqlOperatorCharm._create_services", side_effect=[_FakeApiError, None, None] - ) - def test_on_upgrade_charm(self, _create_services, _patch_pod_labels, _upgrade_supported_check): + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app), {}) + + +def test_on_upgrade_charm(harness): + with ( + patch( + "charms.data_platform_libs.v0.upgrade.DataUpgrade._upgrade_supported_check" + ) as _upgrade_supported_check, + patch( + "charm.PostgresqlOperatorCharm._patch_pod_labels", side_effect=[_FakeApiError, None] + ) as _patch_pod_labels, + patch( + "charm.PostgresqlOperatorCharm._create_services", + side_effect=[_FakeApiError, None, None], + ) as _create_services, + ): # Test with a problem happening when trying to create the k8s resources. - self.charm.unit.status = ActiveStatus() - self.charm.on.upgrade_charm.emit() + harness.charm.unit.status = ActiveStatus() + harness.charm.on.upgrade_charm.emit() _create_services.assert_called_once() _patch_pod_labels.assert_not_called() - self.assertTrue(isinstance(self.charm.unit.status, BlockedStatus)) + tc.assertTrue(isinstance(harness.charm.unit.status, BlockedStatus)) # Test a successful k8s resources creation, but unsuccessful pod patch operation. _create_services.reset_mock() - self.charm.unit.status = ActiveStatus() - self.charm.on.upgrade_charm.emit() + harness.charm.unit.status = ActiveStatus() + harness.charm.on.upgrade_charm.emit() _create_services.assert_called_once() _patch_pod_labels.assert_called_once() - self.assertTrue(isinstance(self.charm.unit.status, BlockedStatus)) + tc.assertTrue(isinstance(harness.charm.unit.status, BlockedStatus)) # Test a successful k8s resources creation and the operation to patch the pod. _create_services.reset_mock() _patch_pod_labels.reset_mock() - self.charm.unit.status = ActiveStatus() - self.charm.on.upgrade_charm.emit() + harness.charm.unit.status = ActiveStatus() + harness.charm.on.upgrade_charm.emit() _create_services.assert_called_once() _patch_pod_labels.assert_called_once() - self.assertFalse(isinstance(self.charm.unit.status, BlockedStatus)) + tc.assertFalse(isinstance(harness.charm.unit.status, BlockedStatus)) - @patch("charm.Client") - def test_create_services(self, _client): + +def test_create_services(harness): + with patch("charm.Client") as _client: # Test the successful creation of the resources. _client.return_value.get.return_value = MagicMock( metadata=MagicMock(ownerReferences="fakeOwnerReferences") ) - self.charm._create_services() + harness.charm._create_services() _client.return_value.get.assert_called_once_with( - res=Pod, name="postgresql-k8s-0", namespace=self.charm.model.name + res=Pod, name="postgresql-k8s-0", namespace=harness.charm.model.name ) - self.assertEqual(_client.return_value.apply.call_count, 2) + tc.assertEqual(_client.return_value.apply.call_count, 2) # Test when the charm fails to get first pod info. _client.reset_mock() _client.return_value.get.side_effect = _FakeApiError - with self.assertRaises(_FakeApiError): - self.charm._create_services() + with tc.assertRaises(_FakeApiError): + harness.charm._create_services() _client.return_value.get.assert_called_once_with( - res=Pod, name="postgresql-k8s-0", namespace=self.charm.model.name + res=Pod, name="postgresql-k8s-0", namespace=harness.charm.model.name ) _client.return_value.apply.assert_not_called() @@ -543,42 +545,49 @@ def test_create_services(self, _client): metadata=MagicMock(ownerReferences="fakeOwnerReferences") ) _client.return_value.apply.side_effect = [None, _FakeApiError] - with self.assertRaises(_FakeApiError): - self.charm._create_services() + with tc.assertRaises(_FakeApiError): + harness.charm._create_services() _client.return_value.get.assert_called_once_with( - res=Pod, name="postgresql-k8s-0", namespace=self.charm.model.name + res=Pod, name="postgresql-k8s-0", namespace=harness.charm.model.name ) - self.assertEqual(_client.return_value.apply.call_count, 2) + tc.assertEqual(_client.return_value.apply.call_count, 2) + - @patch("charm.Client") - def test_patch_pod_labels(self, _client): - member = self.charm._unit.replace("/", "-") +def test_patch_pod_labels(harness): + with patch("charm.Client") as _client: + member = harness.charm._unit.replace("/", "-") - self.charm._patch_pod_labels(member) + harness.charm._patch_pod_labels(member) expected_patch = { "metadata": { - "labels": {"application": "patroni", "cluster-name": f"patroni-{self.charm._name}"} + "labels": { + "application": "patroni", + "cluster-name": f"patroni-{harness.charm._name}", + } } } _client.return_value.patch.assert_called_once_with( Pod, name=member, - namespace=self.charm._namespace, + namespace=harness.charm._namespace, obj=expected_patch, ) - @patch("charm.Patroni.reload_patroni_configuration") - @patch("charm.PostgresqlOperatorCharm._patch_pod_labels") - @patch("charm.PostgresqlOperatorCharm._create_services") - def test_postgresql_layer(self, _, __, ___): + +def test_postgresql_layer(harness): + with ( + patch("charm.Patroni.reload_patroni_configuration"), + patch("charm.PostgresqlOperatorCharm._patch_pod_labels"), + patch("charm.PostgresqlOperatorCharm._create_services"), + ): # Test with the already generated password. - self.harness.set_leader() - plan = self.charm._postgresql_layer().to_dict() + harness.set_leader() + plan = harness.charm._postgresql_layer().to_dict() expected = { "summary": "postgresql + patroni layer", "description": "pebble config layer for postgresql + patroni", "services": { - self._postgresql_service: { + POSTGRESQL_SERVICE: { "override": "replace", "summary": "entrypoint of the postgresql + patroni image", "command": "patroni /var/lib/postgresql/data/patroni.yml", @@ -586,42 +595,42 @@ def test_postgresql_layer(self, _, __, ___): "user": "postgres", "group": "postgres", "environment": { - "PATRONI_KUBERNETES_LABELS": f"{{application: patroni, cluster-name: patroni-{self.charm._name}}}", - "PATRONI_KUBERNETES_NAMESPACE": self.charm._namespace, + "PATRONI_KUBERNETES_LABELS": f"{{application: patroni, cluster-name: patroni-{harness.charm._name}}}", + "PATRONI_KUBERNETES_NAMESPACE": harness.charm._namespace, "PATRONI_KUBERNETES_USE_ENDPOINTS": "true", "PATRONI_NAME": "postgresql-k8s-0", - "PATRONI_SCOPE": f"patroni-{self.charm._name}", + "PATRONI_SCOPE": f"patroni-{harness.charm._name}", "PATRONI_REPLICATION_USERNAME": "replication", "PATRONI_SUPERUSER_USERNAME": "operator", }, }, - self._metrics_service: { + METRICS_SERVICE: { "override": "replace", "summary": "postgresql metrics exporter", "command": "/start-exporter.sh", "startup": "enabled", - "after": [self._postgresql_service], + "after": [POSTGRESQL_SERVICE], "user": "postgres", "group": "postgres", "environment": { "DATA_SOURCE_NAME": ( f"user=monitoring " - f"password={self.charm.get_secret('app', 'monitoring-password')} " + f"password={harness.charm.get_secret('app', 'monitoring-password')} " "host=/var/run/postgresql port=5432 database=postgres" ), }, }, - self.pgbackrest_server_service: { + PGBACKREST_SERVER_SERVICE: { "override": "replace", "summary": "pgBackRest server", - "command": self.pgbackrest_server_service, + "command": PGBACKREST_SERVER_SERVICE, "startup": "disabled", "user": "postgres", "group": "postgres", }, }, "checks": { - self._postgresql_service: { + POSTGRESQL_SERVICE: { "override": "replace", "level": "ready", "http": { @@ -630,23 +639,25 @@ def test_postgresql_layer(self, _, __, ___): } }, } - self.assertDictEqual(plan, expected) + tc.assertDictEqual(plan, expected) - @patch("charm.Client") - def test_on_stop(self, _client): + +def test_on_stop(harness): + with patch("charm.Client") as _client: + rel_id = harness.model.get_relation(PEER).id # Test a successful run of the hook. for planned_units, relation_data in { 0: {}, 1: {"some-relation-data": "some-value"}, }.items(): - self.harness.set_planned_units(planned_units) - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, - self.charm.unit.name, + harness.set_planned_units(planned_units) + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + harness.charm.unit.name, {"some-relation-data": "some-value"}, ) - with self.assertNoLogs("charm", "ERROR"): + with tc.assertNoLogs("charm", "ERROR"): _client.return_value.get.return_value = MagicMock( metadata=MagicMock(ownerReferences="fakeOwnerReferences") ) @@ -654,46 +665,46 @@ def test_on_stop(self, _client): [MagicMock(metadata=MagicMock(name="fakeName1", namespace="fakeNamespace"))], [MagicMock(metadata=MagicMock(name="fakeName2", namespace="fakeNamespace"))], ] - self.charm.on.stop.emit() + harness.charm.on.stop.emit() _client.return_value.get.assert_called_once_with( - res=Pod, name="postgresql-k8s-0", namespace=self.charm.model.name + res=Pod, name="postgresql-k8s-0", namespace=harness.charm.model.name ) for kind in [Endpoints, Service]: _client.return_value.list.assert_any_call( kind, - namespace=self.charm.model.name, - labels={"app.juju.is/created-by": self.charm.app.name}, + namespace=harness.charm.model.name, + labels={"app.juju.is/created-by": harness.charm.app.name}, ) - self.assertEqual(_client.return_value.apply.call_count, 2) - self.assertEqual( - self.harness.get_relation_data(self.rel_id, self.charm.unit), relation_data + tc.assertEqual(_client.return_value.apply.call_count, 2) + tc.assertEqual( + harness.get_relation_data(rel_id, harness.charm.unit), relation_data ) _client.reset_mock() # Test when the charm fails to get first pod info. _client.return_value.get.side_effect = _FakeApiError - with self.assertLogs("charm", "ERROR") as logs: - self.charm.on.stop.emit() + with tc.assertLogs("charm", "ERROR") as logs: + harness.charm.on.stop.emit() _client.return_value.get.assert_called_once_with( - res=Pod, name="postgresql-k8s-0", namespace=self.charm.model.name + res=Pod, name="postgresql-k8s-0", namespace=harness.charm.model.name ) _client.return_value.list.assert_not_called() _client.return_value.apply.assert_not_called() - self.assertIn("failed to get first pod info", "".join(logs.output)) + tc.assertIn("failed to get first pod info", "".join(logs.output)) # Test when the charm fails to get the k8s resources created by the charm and Patroni. _client.return_value.get.side_effect = None _client.return_value.list.side_effect = [[], _FakeApiError] - with self.assertLogs("charm", "ERROR") as logs: - self.charm.on.stop.emit() + with tc.assertLogs("charm", "ERROR") as logs: + harness.charm.on.stop.emit() for kind in [Endpoints, Service]: _client.return_value.list.assert_any_call( kind, - namespace=self.charm.model.name, - labels={"app.juju.is/created-by": self.charm.app.name}, + namespace=harness.charm.model.name, + labels={"app.juju.is/created-by": harness.charm.app.name}, ) _client.return_value.apply.assert_not_called() - self.assertIn( + tc.assertIn( "failed to get the k8s resources created by the charm and Patroni", "".join(logs.output), ) @@ -707,39 +718,43 @@ def test_on_stop(self, _client): [MagicMock(metadata=MagicMock(name="fakeName2", namespace="fakeNamespace"))], ] _client.return_value.apply.side_effect = [None, _FakeApiError] - with self.assertLogs("charm", "ERROR") as logs: - self.charm.on.stop.emit() - self.assertEqual(_client.return_value.apply.call_count, 2) - self.assertIn("failed to patch k8s MagicMock", "".join(logs.output)) - - def test_client_relations(self): - # Test when the charm has no relations. - self.assertEqual(self.charm.client_relations, []) - - # Test when the charm has some relations. - self.harness.add_relation("database", "application") - self.harness.add_relation("db", "legacy-application") - self.harness.add_relation("db-admin", "legacy-admin-application") - database_relation = self.harness.model.get_relation("database") - db_relation = self.harness.model.get_relation("db") - db_admin_relation = self.harness.model.get_relation("db-admin") - self.assertEqual( - self.charm.client_relations, [database_relation, db_relation, db_admin_relation] - ) + with tc.assertLogs("charm", "ERROR") as logs: + harness.charm.on.stop.emit() + tc.assertEqual(_client.return_value.apply.call_count, 2) + tc.assertIn("failed to patch k8s MagicMock", "".join(logs.output)) + + +def test_client_relations(harness): + # Test when the charm has no relations. + tc.assertEqual(harness.charm.client_relations, []) + + # Test when the charm has some relations. + harness.add_relation("database", "application") + harness.add_relation("db", "legacy-application") + harness.add_relation("db-admin", "legacy-admin-application") + database_relation = harness.model.get_relation("database") + db_relation = harness.model.get_relation("db") + db_admin_relation = harness.model.get_relation("db-admin") + tc.assertEqual( + harness.charm.client_relations, [database_relation, db_relation, db_admin_relation] + ) + - @patch("charm.PostgresqlOperatorCharm.postgresql", new_callable=PropertyMock) - def test_validate_config_options(self, _charm_lib): - self.harness.set_can_connect(self._postgresql_container, True) +def test_validate_config_options(harness): + with patch( + "charm.PostgresqlOperatorCharm.postgresql", new_callable=PropertyMock + ) as _charm_lib: + harness.set_can_connect(POSTGRESQL_CONTAINER, True) _charm_lib.return_value.get_postgresql_text_search_configs.return_value = [] _charm_lib.return_value.validate_date_style.return_value = [] _charm_lib.return_value.get_postgresql_timezones.return_value = [] # Test instance_default_text_search_config exception - with self.harness.hooks_disabled(): - self.harness.update_config({"instance_default_text_search_config": "pg_catalog.test"}) + with harness.hooks_disabled(): + harness.update_config({"instance_default_text_search_config": "pg_catalog.test"}) - with self.assertRaises(ValueError) as e: - self.charm._validate_config_options() + with tc.assertRaises(ValueError) as e: + harness.charm._validate_config_options() assert ( e.msg == "instance_default_text_search_config config option has an invalid value" ) @@ -750,22 +765,22 @@ def test_validate_config_options(self, _charm_lib): ] # Test request_date_style exception - with self.harness.hooks_disabled(): - self.harness.update_config({"request_date_style": "ISO, TEST"}) + with harness.hooks_disabled(): + harness.update_config({"request_date_style": "ISO, TEST"}) - with self.assertRaises(ValueError) as e: - self.charm._validate_config_options() + with tc.assertRaises(ValueError) as e: + harness.charm._validate_config_options() assert e.msg == "request_date_style config option has an invalid value" _charm_lib.return_value.validate_date_style.assert_called_once_with("ISO, TEST") _charm_lib.return_value.validate_date_style.return_value = ["ISO, TEST"] # Test request_time_zone exception - with self.harness.hooks_disabled(): - self.harness.update_config({"request_time_zone": "TEST_ZONE"}) + with harness.hooks_disabled(): + harness.update_config({"request_time_zone": "TEST_ZONE"}) - with self.assertRaises(ValueError) as e: - self.charm._validate_config_options() + with tc.assertRaises(ValueError) as e: + harness.charm._validate_config_options() assert e.msg == "request_time_zone config option has an invalid value" _charm_lib.return_value.get_postgresql_timezones.assert_called_once_with() @@ -775,298 +790,282 @@ def test_validate_config_options(self, _charm_lib): # Secrets # - def test_scope_obj(self): - assert self.charm._scope_obj("app") == self.charm.framework.model.app - assert self.charm._scope_obj("unit") == self.charm.framework.model.unit - assert self.charm._scope_obj("test") is None - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - def test_get_secret(self, _): +def test_scope_obj(harness): + assert harness.charm._scope_obj("app") == harness.charm.framework.model.app + assert harness.charm._scope_obj("unit") == harness.charm.framework.model.unit + assert harness.charm._scope_obj("test") is None + + +@patch_network_get(private_address="1.1.1.1") +def test_get_secret(harness): + with patch("charm.PostgresqlOperatorCharm._on_leader_elected"): + rel_id = harness.model.get_relation(PEER).id # App level changes require leader privileges - self.harness.set_leader() + harness.set_leader() # Test application scope. - assert self.charm.get_secret("app", "password") is None - self.harness.update_relation_data( - self.rel_id, self.charm.app.name, {"password": "test-password"} - ) - assert self.charm.get_secret("app", "password") == "test-password" + assert harness.charm.get_secret("app", "password") is None + harness.update_relation_data(rel_id, harness.charm.app.name, {"password": "test-password"}) + assert harness.charm.get_secret("app", "password") == "test-password" # Unit level changes don't require leader privileges - self.harness.set_leader(False) + harness.set_leader(False) # Test unit scope. - assert self.charm.get_secret("unit", "password") is None - self.harness.update_relation_data( - self.rel_id, self.charm.unit.name, {"password": "test-password"} + assert harness.charm.get_secret("unit", "password") is None + harness.update_relation_data( + rel_id, harness.charm.unit.name, {"password": "test-password"} ) - assert self.charm.get_secret("unit", "password") == "test-password" + assert harness.charm.get_secret("unit", "password") == "test-password" - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - @patch("charm.JujuVersion.has_secrets", new_callable=PropertyMock, return_value=True) - def test_on_get_password_secrets(self, mock1, mock2): + +@patch_network_get(private_address="1.1.1.1") +def test_on_get_password_secrets(harness): + with ( + patch("charm.PostgresqlOperatorCharm._on_leader_elected"), + ): # Create a mock event and set passwords in peer relation data. - self.harness.set_leader() + harness.set_leader() mock_event = MagicMock(params={}) - self.harness.charm.set_secret("app", "operator-password", "test-password") - self.harness.charm.set_secret("app", "replication-password", "replication-test-password") + harness.charm.set_secret("app", "operator-password", "test-password") + harness.charm.set_secret("app", "replication-password", "replication-test-password") # Test providing an invalid username. mock_event.params["username"] = "user" - self.charm._on_get_password(mock_event) + harness.charm._on_get_password(mock_event) mock_event.fail.assert_called_once() mock_event.set_results.assert_not_called() # Test without providing the username option. mock_event.reset_mock() del mock_event.params["username"] - self.charm._on_get_password(mock_event) + harness.charm._on_get_password(mock_event) mock_event.set_results.assert_called_once_with({"password": "test-password"}) # Also test providing the username option. mock_event.reset_mock() mock_event.params["username"] = "replication" - self.charm._on_get_password(mock_event) + harness.charm._on_get_password(mock_event) mock_event.set_results.assert_called_once_with({"password": "replication-test-password"}) - @parameterized.expand([("app"), ("unit")]) - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - @patch("charm.JujuVersion.has_secrets", new_callable=PropertyMock, return_value=True) - def test_get_secret_secrets(self, scope, _, __): - self.harness.set_leader() - assert self.charm.get_secret(scope, "operator-password") is None - self.charm.set_secret(scope, "operator-password", "test-password") - assert self.charm.get_secret(scope, "operator-password") == "test-password" +@pytest.mark.parametrize("scope", [("app"), ("unit")]) +@patch_network_get(private_address="1.1.1.1") +def test_get_secret_secrets(harness, scope): + with ( + patch("charm.PostgresqlOperatorCharm._on_leader_elected"), + ): + harness.set_leader() + + assert harness.charm.get_secret(scope, "operator-password") is None + harness.charm.set_secret(scope, "operator-password", "test-password") + assert harness.charm.get_secret(scope, "operator-password") == "test-password" + - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - def test_set_secret(self, _): - self.harness.set_leader() +@patch_network_get(private_address="1.1.1.1") +def test_set_secret(harness): + with patch("charm.PostgresqlOperatorCharm._on_leader_elected"): + rel_id = harness.model.get_relation(PEER).id + harness.set_leader() # Test application scope. - assert "password" not in self.harness.get_relation_data(self.rel_id, self.charm.app.name) - self.charm.set_secret("app", "password", "test-password") + assert "password" not in harness.get_relation_data(rel_id, harness.charm.app.name) + harness.charm.set_secret("app", "password", "test-password") assert ( - self.harness.get_relation_data(self.rel_id, self.charm.app.name)["password"] + harness.get_relation_data(rel_id, harness.charm.app.name)["password"] == "test-password" ) - self.charm.set_secret("app", "password", None) - assert "password" not in self.harness.get_relation_data(self.rel_id, self.charm.app.name) + harness.charm.set_secret("app", "password", None) + assert "password" not in harness.get_relation_data(rel_id, harness.charm.app.name) # Test unit scope. - assert "password" not in self.harness.get_relation_data(self.rel_id, self.charm.unit.name) - self.charm.set_secret("unit", "password", "test-password") + assert "password" not in harness.get_relation_data(rel_id, harness.charm.unit.name) + harness.charm.set_secret("unit", "password", "test-password") assert ( - self.harness.get_relation_data(self.rel_id, self.charm.unit.name)["password"] + harness.get_relation_data(rel_id, harness.charm.unit.name)["password"] == "test-password" ) - self.charm.set_secret("unit", "password", None) - assert "password" not in self.harness.get_relation_data(self.rel_id, self.charm.unit.name) - - with self.assertRaises(RuntimeError): - self.charm.set_secret("test", "password", "test") - - @parameterized.expand([("app", True), ("unit", True), ("unit", False)]) - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - @patch("charm.JujuVersion.has_secrets", new_callable=PropertyMock, return_value=True) - def test_set_reset_new_secret(self, scope, is_leader, _, __): - """NOTE: currently ops.testing seems to allow for non-leader to set secrets too!""" + harness.charm.set_secret("unit", "password", None) + assert "password" not in harness.get_relation_data(rel_id, harness.charm.unit.name) + + with tc.assertRaises(RuntimeError): + harness.charm.set_secret("test", "password", "test") + + +@pytest.mark.parametrize("scope,is_leader", [("app", True), ("unit", True), ("unit", False)]) +@patch_network_get(private_address="1.1.1.1") +def test_set_reset_new_secret(harness, scope, is_leader): + """NOTE: currently ops.testing seems to allow for non-leader to set secrets too!""" + with ( + patch("charm.PostgresqlOperatorCharm._on_leader_elected"), + ): # App has to be leader, unit can be either - self.harness.set_leader(is_leader) + harness.set_leader(is_leader) # Getting current password - self.harness.charm.set_secret(scope, "new-secret", "bla") - assert self.harness.charm.get_secret(scope, "new-secret") == "bla" + harness.charm.set_secret(scope, "new-secret", "bla") + assert harness.charm.get_secret(scope, "new-secret") == "bla" # Reset new secret - self.harness.charm.set_secret(scope, "new-secret", "blablabla") - assert self.harness.charm.get_secret(scope, "new-secret") == "blablabla" + harness.charm.set_secret(scope, "new-secret", "blablabla") + assert harness.charm.get_secret(scope, "new-secret") == "blablabla" # Set another new secret - self.harness.charm.set_secret(scope, "new-secret2", "blablabla") - assert self.harness.charm.get_secret(scope, "new-secret2") == "blablabla" - - @parameterized.expand([("app", True), ("unit", True), ("unit", False)]) - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - @patch("charm.JujuVersion.has_secrets", new_callable=PropertyMock, return_value=True) - def test_invalid_secret(self, scope, is_leader, _, __): - # App has to be leader, unit can be either - self.harness.set_leader(is_leader) - - with self.assertRaises(RelationDataTypeError): - self.harness.charm.set_secret(scope, "somekey", 1) - - self.harness.charm.set_secret(scope, "somekey", "") - assert self.harness.charm.get_secret(scope, "somekey") is None - - @pytest.mark.usefixtures("use_caplog") - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - def test_delete_password(self, _): - """NOTE: currently ops.testing seems to allow for non-leader to remove secrets too!""" - self.harness.set_leader(True) - self.harness.update_relation_data( - self.rel_id, self.charm.app.name, {"replication": "somepw"} - ) - self.harness.charm.remove_secret("app", "replication") - assert self.harness.charm.get_secret("app", "replication") is None + harness.charm.set_secret(scope, "new-secret2", "blablabla") + assert harness.charm.get_secret(scope, "new-secret2") == "blablabla" - self.harness.set_leader(False) - self.harness.update_relation_data( - self.rel_id, self.charm.unit.name, {"somekey": "somevalue"} - ) - self.harness.charm.remove_secret("unit", "somekey") - assert self.harness.charm.get_secret("unit", "somekey") is None - self.harness.set_leader(True) - with self._caplog.at_level(logging.ERROR): - self.harness.charm.remove_secret("app", "replication") - assert ( - "Non-existing field 'replication' was attempted to be removed" in self._caplog.text - ) +@pytest.mark.parametrize("scope,is_leader", [("app", True), ("unit", True), ("unit", False)]) +@patch_network_get(private_address="1.1.1.1") +def test_invalid_secret(harness, scope, is_leader): + with ( + patch("charm.PostgresqlOperatorCharm._on_leader_elected"), + ): + # App has to be leader, unit can be either + harness.set_leader(is_leader) - self.harness.charm.remove_secret("unit", "somekey") - assert "Non-existing field 'somekey' was attempted to be removed" in self._caplog.text + with tc.assertRaises(RelationDataTypeError): + harness.charm.set_secret(scope, "somekey", 1) - self.harness.charm.remove_secret("app", "non-existing-secret") - assert ( - "Non-existing field 'non-existing-secret' was attempted to be removed" - in self._caplog.text - ) + harness.charm.set_secret(scope, "somekey", "") + assert harness.charm.get_secret(scope, "somekey") is None - self.harness.charm.remove_secret("unit", "non-existing-secret") - assert ( - "Non-existing field 'non-existing-secret' was attempted to be removed" - in self._caplog.text - ) - @patch("charm.JujuVersion.has_secrets", new_callable=PropertyMock, return_value=True) - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - @pytest.mark.usefixtures("use_caplog") - def test_delete_existing_password_secrets(self, _, __): - """NOTE: currently ops.testing seems to allow for non-leader to remove secrets too!""" - self.harness.set_leader(True) - self.harness.charm.set_secret("app", "operator-password", "somepw") - self.harness.charm.remove_secret("app", "operator-password") - assert self.harness.charm.get_secret("app", "operator-password") is None - - self.harness.set_leader(False) - self.harness.charm.set_secret("unit", "operator-password", "somesecret") - self.harness.charm.remove_secret("unit", "operator-password") - assert self.harness.charm.get_secret("unit", "operator-password") is None - - self.harness.set_leader(True) - with self._caplog.at_level(logging.ERROR): - self.harness.charm.remove_secret("app", "operator-password") - assert ( - "Non-existing secret operator-password was attempted to be removed." - in self._caplog.text - ) +@patch_network_get(private_address="1.1.1.1") +def test_delete_password(harness, juju_has_secrets, caplog): + """NOTE: currently ops.testing seems to allow for non-leader to remove secrets too!""" + with ( + patch("charm.PostgresqlOperatorCharm._on_leader_elected"), + ): + harness.set_leader(True) + harness.charm.set_secret("app", "operator-password", "somepw") + harness.charm.remove_secret("app", "operator-password") + assert harness.charm.get_secret("app", "operator-password") is None + + harness.set_leader(False) + harness.charm.set_secret("unit", "operator-password", "somesecret") + harness.charm.remove_secret("unit", "operator-password") + assert harness.charm.get_secret("unit", "operator-password") is None + + harness.set_leader(True) + with caplog.at_level(logging.ERROR): + if juju_has_secrets: + error_message = ( + "Non-existing secret operator-password was attempted to be removed." + ) + else: + error_message = ( + "Non-existing field 'operator-password' was attempted to be removed" + ) - self.harness.charm.remove_secret("unit", "operator-password") - assert ( - "Non-existing secret operator-password was attempted to be removed." - in self._caplog.text - ) + harness.charm.remove_secret("app", "operator-password") + assert error_message in caplog.text + + harness.charm.remove_secret("unit", "operator-password") + assert error_message in caplog.text - self.harness.charm.remove_secret("app", "non-existing-secret") + harness.charm.remove_secret("app", "non-existing-secret") assert ( "Non-existing field 'non-existing-secret' was attempted to be removed" - in self._caplog.text + in caplog.text ) - self.harness.charm.remove_secret("unit", "non-existing-secret") + harness.charm.remove_secret("unit", "non-existing-secret") assert ( "Non-existing field 'non-existing-secret' was attempted to be removed" - in self._caplog.text + in caplog.text ) - @parameterized.expand([("app", True), ("unit", True), ("unit", False)]) - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - @patch("charm.JujuVersion.has_secrets", new_callable=PropertyMock, return_value=True) - def test_migration_from_databag(self, scope, is_leader, _, __): - """Check if we're moving on to use secrets when live upgrade from databag to Secrets usage.""" + +@pytest.mark.parametrize("scope,is_leader", [("app", True), ("unit", True), ("unit", False)]) +@patch_network_get(private_address="1.1.1.1") +def test_migration_from_databag(harness, juju_has_secrets, scope, is_leader): + """Check if we're moving on to use secrets when live upgrade from databag to Secrets usage.""" + with ( + patch("charm.PostgresqlOperatorCharm._on_leader_elected"), + ): + # as this test checks for a migration from databag to secrets, + # there's no need for this test when secrets are not enabled. + if not juju_has_secrets: + return + + rel_id = harness.model.get_relation(PEER).id # App has to be leader, unit can be either - self.harness.set_leader(is_leader) + harness.set_leader(is_leader) # Getting current password - entity = getattr(self.charm, scope) - self.harness.update_relation_data(self.rel_id, entity.name, {"operator-password": "bla"}) - assert self.harness.charm.get_secret(scope, "operator-password") == "bla" + entity = getattr(harness.charm, scope) + harness.update_relation_data(rel_id, entity.name, {"operator-password": "bla"}) + assert harness.charm.get_secret(scope, "operator-password") == "bla" # Reset new secret - self.harness.charm.set_secret(scope, "operator-password", "blablabla") - assert self.harness.charm.model.get_secret(label=f"postgresql-k8s.{scope}") - assert self.harness.charm.get_secret(scope, "operator-password") == "blablabla" - assert "operator-password" not in self.harness.get_relation_data( - self.rel_id, getattr(self.charm, scope).name + harness.charm.set_secret(scope, "operator-password", "blablabla") + assert harness.charm.model.get_secret(label=f"postgresql-k8s.{scope}") + assert harness.charm.get_secret(scope, "operator-password") == "blablabla" + assert "operator-password" not in harness.get_relation_data( + rel_id, getattr(harness.charm, scope).name ) - @parameterized.expand([("app", True), ("unit", True), ("unit", False)]) - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm._on_leader_elected") - @patch("charm.JujuVersion.has_secrets", new_callable=PropertyMock, return_value=True) - def test_migration_from_single_secret(self, scope, is_leader, _, __): - """Check if we're moving on to use secrets when live upgrade from databag to Secrets usage.""" + +@pytest.mark.parametrize("scope,is_leader", [("app", True), ("unit", True), ("unit", False)]) +@patch_network_get(private_address="1.1.1.1") +def test_migration_from_single_secret(harness, juju_has_secrets, scope, is_leader): + """Check if we're moving on to use secrets when live upgrade from databag to Secrets usage.""" + with ( + patch("charm.PostgresqlOperatorCharm._on_leader_elected"), + ): + # as this test checks for a migration from databag to secrets, + # there's no need for this test when secrets are not enabled. + if not juju_has_secrets: + return + + rel_id = harness.model.get_relation(PEER).id + # App has to be leader, unit can be either - self.harness.set_leader(is_leader) + harness.set_leader(is_leader) - secret = self.harness.charm.app.add_secret({"operator-password": "bla"}) + secret = harness.charm.app.add_secret({"operator-password": "bla"}) # Getting current password - entity = getattr(self.charm, scope) - self.harness.update_relation_data( - self.rel_id, entity.name, {SECRET_INTERNAL_LABEL: secret.id} - ) - assert self.harness.charm.get_secret(scope, "operator-password") == "bla" + entity = getattr(harness.charm, scope) + harness.update_relation_data(rel_id, entity.name, {SECRET_INTERNAL_LABEL: secret.id}) + assert harness.charm.get_secret(scope, "operator-password") == "bla" # Reset new secret # Only the leader can set app secret content. - with self.harness.hooks_disabled(): - self.harness.set_leader(True) - self.harness.charm.set_secret(scope, "operator-password", "blablabla") - with self.harness.hooks_disabled(): - self.harness.set_leader(is_leader) - assert self.harness.charm.model.get_secret(label=f"postgresql-k8s.{scope}") - assert self.harness.charm.get_secret(scope, "operator-password") == "blablabla" - assert SECRET_INTERNAL_LABEL not in self.harness.get_relation_data( - self.rel_id, getattr(self.charm, scope).name + with harness.hooks_disabled(): + harness.set_leader(True) + harness.charm.set_secret(scope, "operator-password", "blablabla") + with harness.hooks_disabled(): + harness.set_leader(is_leader) + assert harness.charm.model.get_secret(label=f"postgresql-k8s.{scope}") + assert harness.charm.get_secret(scope, "operator-password") == "blablabla" + assert SECRET_INTERNAL_LABEL not in harness.get_relation_data( + rel_id, getattr(harness.charm, scope).name ) - @patch("charm.PostgresqlOperatorCharm._set_active_status") - @patch("backups.PostgreSQLBackups.start_stop_pgbackrest_service") - @patch("backups.PostgreSQLBackups.check_stanza") - @patch("backups.PostgreSQLBackups.coordinate_stanza_fields") - @patch("charm.Patroni.reinitialize_postgresql") - @patch("charm.Patroni.member_replication_lag", new_callable=PropertyMock) - @patch("charm.PostgresqlOperatorCharm.is_primary") - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch("charm.PostgresqlOperatorCharm._add_members") - @patch("ops.framework.EventBase.defer") - def test_on_peer_relation_changed( - self, - _defer, - _add_members, - _update_config, - _member_started, - _is_primary, - _member_replication_lag, - _reinitialize_postgresql, - _coordinate_stanza_fields, - _check_stanza, - _start_stop_pgbackrest_service, - _set_active_status, + +def test_on_peer_relation_changed(harness): + with ( + patch("charm.PostgresqlOperatorCharm._set_active_status") as _set_active_status, + patch( + "backups.PostgreSQLBackups.start_stop_pgbackrest_service" + ) as _start_stop_pgbackrest_service, + patch("backups.PostgreSQLBackups.check_stanza") as _check_stanza, + patch("backups.PostgreSQLBackups.coordinate_stanza_fields") as _coordinate_stanza_fields, + patch("charm.Patroni.reinitialize_postgresql") as _reinitialize_postgresql, + patch( + "charm.Patroni.member_replication_lag", new_callable=PropertyMock + ) as _member_replication_lag, + patch("charm.PostgresqlOperatorCharm.is_primary") as _is_primary, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch("charm.PostgresqlOperatorCharm._add_members") as _add_members, + patch("ops.framework.EventBase.defer") as _defer, ): + rel_id = harness.model.get_relation(PEER).id # Test when the cluster was not initialised yet. - self.harness.set_can_connect(self._postgresql_container, True) - self.relation = self.harness.model.get_relation(self._peer_relation, self.rel_id) - self.charm.on.database_peers_relation_changed.emit(self.relation) + harness.set_can_connect(POSTGRESQL_CONTAINER, True) + relation = harness.model.get_relation(PEER, rel_id) + harness.charm.on.database_peers_relation_changed.emit(relation) _defer.assert_called_once() _add_members.assert_not_called() _update_config.assert_not_called() @@ -1077,13 +1076,13 @@ def test_on_peer_relation_changed( # Test when the cluster has already initialised, but the unit is not the leader and is not # part of the cluster yet. _defer.reset_mock() - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, - self.charm.app.name, + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + harness.charm.app.name, {"cluster_initialised": "True"}, ) - self.charm.on.database_peers_relation_changed.emit(self.relation) + harness.charm.on.database_peers_relation_changed.emit(relation) _defer.assert_not_called() _add_members.assert_not_called() _update_config.assert_not_called() @@ -1092,9 +1091,9 @@ def test_on_peer_relation_changed( _start_stop_pgbackrest_service.assert_not_called() # Test when the unit is the leader. - with self.harness.hooks_disabled(): - self.harness.set_leader() - self.charm.on.database_peers_relation_changed.emit(self.relation) + with harness.hooks_disabled(): + harness.set_leader() + harness.charm.on.database_peers_relation_changed.emit(relation) _defer.assert_not_called() _add_members.assert_called_once() _update_config.assert_not_called() @@ -1104,19 +1103,19 @@ def test_on_peer_relation_changed( # Test when the unit is part of the cluster but the container # is not ready yet. - self.harness.set_can_connect(self._postgresql_container, False) - with self.harness.hooks_disabled(): - unit_id = self.charm.unit.name.split("/")[1] - self.harness.update_relation_data( - self.rel_id, - self.charm.app.name, + harness.set_can_connect(POSTGRESQL_CONTAINER, False) + with harness.hooks_disabled(): + unit_id = harness.charm.unit.name.split("/")[1] + harness.update_relation_data( + rel_id, + harness.charm.app.name, { "endpoints": json.dumps([ - f"{self.charm.app.name}-{unit_id}.{self.charm.app.name}-endpoints" + f"{harness.charm.app.name}-{unit_id}.{harness.charm.app.name}-endpoints" ]) }, ) - self.charm.on.database_peers_relation_changed.emit(self.relation) + harness.charm.on.database_peers_relation_changed.emit(relation) _defer.assert_not_called() _update_config.assert_not_called() _coordinate_stanza_fields.assert_not_called() @@ -1124,9 +1123,9 @@ def test_on_peer_relation_changed( _start_stop_pgbackrest_service.assert_not_called() # Test when the container is ready but Patroni hasn't started yet. - self.harness.set_can_connect(self._postgresql_container, True) + harness.set_can_connect(POSTGRESQL_CONTAINER, True) _member_started.return_value = False - self.charm.on.database_peers_relation_changed.emit(self.relation) + harness.charm.on.database_peers_relation_changed.emit(relation) _defer.assert_called_once() _update_config.assert_called_once() _coordinate_stanza_fields.assert_not_called() @@ -1144,8 +1143,8 @@ def test_on_peer_relation_changed( _start_stop_pgbackrest_service.reset_mock() _is_primary.return_value = values[0] _member_replication_lag.return_value = values[1] - self.charm.unit.status = ActiveStatus() - self.charm.on.database_peers_relation_changed.emit(self.relation) + harness.charm.unit.status = ActiveStatus() + harness.charm.on.database_peers_relation_changed.emit(relation) if _is_primary.return_value == values[0] or int(values[1]) <= 1000: _defer.assert_not_called() _coordinate_stanza_fields.assert_called_once() @@ -1166,27 +1165,25 @@ def test_on_peer_relation_changed( _is_primary.return_value = True _member_replication_lag.return_value = "0" _start_stop_pgbackrest_service.return_value = False - self.charm.unit.status = MaintenanceStatus() - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, self.charm.unit.name, {"start-tls-server": ""} - ) - self.charm.on.database_peers_relation_changed.emit(self.relation) - self.assertEqual( - self.harness.get_relation_data(self.rel_id, self.charm.unit), + harness.charm.unit.status = MaintenanceStatus() + with harness.hooks_disabled(): + harness.update_relation_data(rel_id, harness.charm.unit.name, {"start-tls-server": ""}) + harness.charm.on.database_peers_relation_changed.emit(relation) + tc.assertEqual( + harness.get_relation_data(rel_id, harness.charm.unit), {"start-tls-server": "True"}, ) _defer.assert_called_once() - self.assertIsInstance(self.charm.unit.status, MaintenanceStatus) + tc.assertIsInstance(harness.charm.unit.status, MaintenanceStatus) _set_active_status.assert_not_called() # Test the status being changed when it was possible to start the # pgBackRest service. _defer.reset_mock() _start_stop_pgbackrest_service.return_value = True - self.charm.on.database_peers_relation_changed.emit(self.relation) - self.assertEqual( - self.harness.get_relation_data(self.rel_id, self.charm.unit), + harness.charm.on.database_peers_relation_changed.emit(relation) + tc.assertEqual( + harness.get_relation_data(rel_id, harness.charm.unit), {}, ) _defer.assert_not_called() @@ -1194,28 +1191,27 @@ def test_on_peer_relation_changed( # Test that a blocked status is not overridden. _set_active_status.reset_mock() - self.charm.unit.status = BlockedStatus() - self.charm.on.database_peers_relation_changed.emit(self.relation) - self.assertIsInstance(self.charm.unit.status, BlockedStatus) + harness.charm.unit.status = BlockedStatus() + harness.charm.on.database_peers_relation_changed.emit(relation) + tc.assertIsInstance(harness.charm.unit.status, BlockedStatus) _set_active_status.assert_not_called() - @patch("charm.Patroni.reinitialize_postgresql") - @patch("charm.Patroni.member_streaming", new_callable=PropertyMock) - @patch("charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock) - @patch("charm.Patroni.is_database_running", new_callable=PropertyMock) - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - @patch("ops.model.Container.restart") - def test_handle_processes_failures( - self, - _restart, - _member_started, - _is_database_running, - _is_primary, - _member_streaming, - _reinitialize_postgresql, + +def test_handle_processes_failures(harness): + with ( + patch("charm.Patroni.reinitialize_postgresql") as _reinitialize_postgresql, + patch("charm.Patroni.member_streaming", new_callable=PropertyMock) as _member_streaming, + patch( + "charm.PostgresqlOperatorCharm.is_primary", new_callable=PropertyMock + ) as _is_primary, + patch( + "charm.Patroni.is_database_running", new_callable=PropertyMock + ) as _is_database_running, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("ops.model.Container.restart") as _restart, ): # Test when there are no processes failures to handle. - self.harness.set_can_connect(self._postgresql_container, True) + harness.set_can_connect(POSTGRESQL_CONTAINER, True) for values in itertools.product( [True, False], [True, False], [True, False], [True, False], [True, False] ): @@ -1227,7 +1223,7 @@ def test_handle_processes_failures( _is_database_running.return_value = values[2] _is_primary.return_value = values[3] _member_streaming.return_value = values[4] - self.assertFalse(self.charm._handle_processes_failures()) + tc.assertFalse(harness.charm._handle_processes_failures()) _restart.assert_not_called() _reinitialize_postgresql.assert_not_called() @@ -1260,10 +1256,10 @@ def test_handle_processes_failures( _is_primary.return_value = values[1] _member_started.side_effect = [False, values[2]] _member_streaming.return_value = values[3] - self.charm.unit.status = ActiveStatus() - result = self.charm._handle_processes_failures() - self.assertTrue(result) if values[0] is None else self.assertFalse(result) - self.assertIsInstance(self.charm.unit.status, ActiveStatus) + harness.charm.unit.status = ActiveStatus() + result = harness.charm._handle_processes_failures() + tc.assertTrue(result) if values[0] is None else tc.assertFalse(result) + tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) _restart.assert_called_once_with("postgresql") _reinitialize_postgresql.assert_not_called() @@ -1282,199 +1278,194 @@ def test_handle_processes_failures( _reinitialize_postgresql.side_effect = values[0] _member_started.side_effect = [values[1], True] _is_database_running.return_value = values[2] - self.charm.unit.status = ActiveStatus() - result = self.charm._handle_processes_failures() - self.assertTrue(result) if values[0] is None else self.assertFalse(result) - self.assertIsInstance( - self.charm.unit.status, MaintenanceStatus if values[0] is None else ActiveStatus + harness.charm.unit.status = ActiveStatus() + result = harness.charm._handle_processes_failures() + tc.assertTrue(result) if values[0] is None else tc.assertFalse(result) + tc.assertIsInstance( + harness.charm.unit.status, MaintenanceStatus if values[0] is None else ActiveStatus ) _restart.assert_not_called() _reinitialize_postgresql.assert_called_once() - @patch("ops.model.Container.get_plan") - @patch("charm.PostgresqlOperatorCharm._handle_postgresql_restart_need") - @patch("charm.Patroni.bulk_update_parameters_controller_by_patroni") - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - @patch("charm.PostgresqlOperatorCharm._is_workload_running", new_callable=PropertyMock) - @patch("charm.Patroni.render_patroni_yml_file") - @patch("charm.PostgreSQLUpgrade") - @patch("charm.PostgresqlOperatorCharm.is_tls_enabled", new_callable=PropertyMock) - def test_update_config( - self, - _is_tls_enabled, - _upgrade, - _render_patroni_yml_file, - _is_workload_running, - _member_started, - _, - _handle_postgresql_restart_need, - _get_plan, + +def test_update_config(harness): + with ( + patch("ops.model.Container.get_plan") as _get_plan, + patch( + "charm.PostgresqlOperatorCharm._handle_postgresql_restart_need" + ) as _handle_postgresql_restart_need, + patch("charm.Patroni.bulk_update_parameters_controller_by_patroni"), + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch( + "charm.PostgresqlOperatorCharm._is_workload_running", new_callable=PropertyMock + ) as _is_workload_running, + patch("charm.Patroni.render_patroni_yml_file") as _render_patroni_yml_file, + patch("charm.PostgreSQLUpgrade") as _upgrade, + patch( + "charm.PostgresqlOperatorCharm.is_tls_enabled", new_callable=PropertyMock + ) as _is_tls_enabled, + patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, ): - with patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock: - # Mock some properties. - self.harness.set_can_connect(self._postgresql_container, True) - self.upgrade_relation = self.harness.add_relation("upgrade", self.charm.app.name) - postgresql_mock.is_tls_enabled = PropertyMock(side_effect=[False, False, False, False]) - _is_workload_running.side_effect = [False, False, True, True, False, True] - _member_started.side_effect = [True, True, False] - postgresql_mock.build_postgresql_parameters.return_value = {"test": "test"} - - # Test when only one of the two config options for profile limit memory is set. - self.harness.update_config({"profile-limit-memory": 1000}) - self.charm.update_config() - - # Test when only one of the two config options for profile limit memory is set. - self.harness.update_config( - {"profile_limit_memory": 1000}, unset={"profile-limit-memory"} - ) - self.charm.update_config() - - # Test when the two config options for profile limit memory are set at the same time. - _render_patroni_yml_file.reset_mock() - self.harness.update_config({"profile-limit-memory": 1000}) - with self.assertRaises(ValueError): - self.charm.update_config() - - # Test without TLS files available. - self.harness.update_config(unset={"profile-limit-memory", "profile_limit_memory"}) - with self.harness.hooks_disabled(): - self.harness.update_relation_data(self.rel_id, self.charm.unit.name, {"tls": ""}) - _is_tls_enabled.return_value = False - self.charm.update_config() - _render_patroni_yml_file.assert_called_once_with( - connectivity=True, - is_creating_backup=False, - enable_tls=False, - is_no_sync_member=False, - backup_id=None, - stanza=None, - restore_stanza=None, - parameters={"test": "test"}, - ) - _handle_postgresql_restart_need.assert_called_once() - self.assertNotIn( - "tls", self.harness.get_relation_data(self.rel_id, self.charm.unit.name) - ) + rel_id = harness.model.get_relation(PEER).id + # Mock some properties. + harness.set_can_connect(POSTGRESQL_CONTAINER, True) + harness.add_relation("upgrade", harness.charm.app.name) + postgresql_mock.is_tls_enabled = PropertyMock(side_effect=[False, False, False, False]) + _is_workload_running.side_effect = [False, False, True, True, False, True] + _member_started.side_effect = [True, True, False] + postgresql_mock.build_postgresql_parameters.return_value = {"test": "test"} + + # Test when only one of the two config options for profile limit memory is set. + harness.update_config({"profile-limit-memory": 1000}) + harness.charm.update_config() + + # Test when only one of the two config options for profile limit memory is set. + harness.update_config({"profile_limit_memory": 1000}, unset={"profile-limit-memory"}) + harness.charm.update_config() + + # Test when the two config options for profile limit memory are set at the same time. + _render_patroni_yml_file.reset_mock() + harness.update_config({"profile-limit-memory": 1000}) + with tc.assertRaises(ValueError): + harness.charm.update_config() + + # Test without TLS files available. + harness.update_config(unset={"profile-limit-memory", "profile_limit_memory"}) + with harness.hooks_disabled(): + harness.update_relation_data(rel_id, harness.charm.unit.name, {"tls": ""}) + _is_tls_enabled.return_value = False + harness.charm.update_config() + _render_patroni_yml_file.assert_called_once_with( + connectivity=True, + is_creating_backup=False, + enable_tls=False, + is_no_sync_member=False, + backup_id=None, + stanza=None, + restore_stanza=None, + parameters={"test": "test"}, + ) + _handle_postgresql_restart_need.assert_called_once() + tc.assertNotIn("tls", harness.get_relation_data(rel_id, harness.charm.unit.name)) + + # Test with TLS files available. + _handle_postgresql_restart_need.reset_mock() + harness.update_relation_data( + rel_id, harness.charm.unit.name, {"tls": ""} + ) # Mock some data in the relation to test that it change. + _is_tls_enabled.return_value = True + _render_patroni_yml_file.reset_mock() + harness.charm.update_config() + _render_patroni_yml_file.assert_called_once_with( + connectivity=True, + is_creating_backup=False, + enable_tls=True, + is_no_sync_member=False, + backup_id=None, + stanza=None, + restore_stanza=None, + parameters={"test": "test"}, + ) + _handle_postgresql_restart_need.assert_called_once() + tc.assertNotIn( + "tls", + harness.get_relation_data( + rel_id, harness.charm.unit.name + ), # The "tls" flag is set in handle_postgresql_restart_need. + ) - # Test with TLS files available. - _handle_postgresql_restart_need.reset_mock() - self.harness.update_relation_data( - self.rel_id, self.charm.unit.name, {"tls": ""} - ) # Mock some data in the relation to test that it change. - _is_tls_enabled.return_value = True - _render_patroni_yml_file.reset_mock() - self.charm.update_config() - _render_patroni_yml_file.assert_called_once_with( - connectivity=True, - is_creating_backup=False, - enable_tls=True, - is_no_sync_member=False, - backup_id=None, - stanza=None, - restore_stanza=None, - parameters={"test": "test"}, - ) - _handle_postgresql_restart_need.assert_called_once() - self.assertNotIn( - "tls", - self.harness.get_relation_data( - self.rel_id, self.charm.unit.name - ), # The "tls" flag is set in handle_postgresql_restart_need. - ) + # Test with workload not running yet. + harness.update_relation_data( + rel_id, harness.charm.unit.name, {"tls": ""} + ) # Mock some data in the relation to test that it change. + _handle_postgresql_restart_need.reset_mock() + harness.charm.update_config() + _handle_postgresql_restart_need.assert_not_called() + tc.assertEqual( + harness.get_relation_data(rel_id, harness.charm.unit.name)["tls"], "enabled" + ) - # Test with workload not running yet. - self.harness.update_relation_data( - self.rel_id, self.charm.unit.name, {"tls": ""} - ) # Mock some data in the relation to test that it change. - _handle_postgresql_restart_need.reset_mock() - self.charm.update_config() - _handle_postgresql_restart_need.assert_not_called() - self.assertEqual( - self.harness.get_relation_data(self.rel_id, self.charm.unit.name)["tls"], "enabled" + # Test with member not started yet. + harness.update_relation_data( + rel_id, harness.charm.unit.name, {"tls": ""} + ) # Mock some data in the relation to test that it doesn't change. + harness.charm.update_config() + _handle_postgresql_restart_need.assert_not_called() + tc.assertNotIn("tls", harness.get_relation_data(rel_id, harness.charm.unit.name)) + + +def test_handle_postgresql_restart_need(harness): + with ( + patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, + patch("charms.rolling_ops.v0.rollingops.RollingOpsManager._on_acquire_lock") as _restart, + patch("charm.PostgresqlOperatorCharm._generate_metrics_jobs") as _generate_metrics_jobs, + patch("charm.wait_fixed", return_value=wait_fixed(0)), + patch("charm.Patroni.reload_patroni_configuration") as _reload_patroni_configuration, + patch( + "charm.PostgresqlOperatorCharm.is_tls_enabled", new_callable=PropertyMock + ) as _is_tls_enabled, + ): + rel_id = harness.model.get_relation(PEER).id + for values in itertools.product([True, False], [True, False], [True, False]): + _reload_patroni_configuration.reset_mock() + _generate_metrics_jobs.reset_mock() + _restart.reset_mock() + with harness.hooks_disabled(): + harness.update_relation_data(rel_id, harness.charm.unit.name, {"tls": ""}) + + _is_tls_enabled.return_value = values[0] + postgresql_mock.is_tls_enabled = PropertyMock(return_value=values[1]) + postgresql_mock.is_restart_pending = PropertyMock(return_value=values[2]) + + harness.charm._handle_postgresql_restart_need() + _reload_patroni_configuration.assert_called_once() + ( + tc.assertIn("tls", harness.get_relation_data(rel_id, harness.charm.unit)) + if values[0] + else tc.assertNotIn("tls", harness.get_relation_data(rel_id, harness.charm.unit)) ) + if (values[0] != values[1]) or values[2]: + _generate_metrics_jobs.assert_called_once_with(values[0]) + _restart.assert_called_once() + else: + _generate_metrics_jobs.assert_not_called() + _restart.assert_not_called() - # Test with member not started yet. - self.harness.update_relation_data( - self.rel_id, self.charm.unit.name, {"tls": ""} - ) # Mock some data in the relation to test that it doesn't change. - self.charm.update_config() - _handle_postgresql_restart_need.assert_not_called() - self.assertNotIn( - "tls", self.harness.get_relation_data(self.rel_id, self.charm.unit.name) - ) - @patch("charms.rolling_ops.v0.rollingops.RollingOpsManager._on_acquire_lock") - @patch("charm.PostgresqlOperatorCharm._generate_metrics_jobs") - @patch("charm.wait_fixed", return_value=wait_fixed(0)) - @patch("charm.Patroni.reload_patroni_configuration") - @patch("charm.PostgresqlOperatorCharm.is_tls_enabled", new_callable=PropertyMock) - def test_handle_postgresql_restart_need( - self, _is_tls_enabled, _reload_patroni_configuration, _, _generate_metrics_jobs, _restart +def test_set_active_status(harness): + with ( + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("charm.Patroni.get_primary") as _get_primary, ): - with patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock: - for values in itertools.product([True, False], [True, False], [True, False]): - _reload_patroni_configuration.reset_mock() - _generate_metrics_jobs.reset_mock() - _restart.reset_mock() - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, self.charm.unit.name, {"tls": ""} - ) - - _is_tls_enabled.return_value = values[0] - postgresql_mock.is_tls_enabled = PropertyMock(return_value=values[1]) - postgresql_mock.is_restart_pending = PropertyMock(return_value=values[2]) - - self.charm._handle_postgresql_restart_need() - _reload_patroni_configuration.assert_called_once() - ( - self.assertIn( - "tls", self.harness.get_relation_data(self.rel_id, self.charm.unit) - ) - if values[0] - else self.assertNotIn( - "tls", self.harness.get_relation_data(self.rel_id, self.charm.unit) - ) - ) - if (values[0] != values[1]) or values[2]: - _generate_metrics_jobs.assert_called_once_with(values[0]) - _restart.assert_called_once() - else: - _generate_metrics_jobs.assert_not_called() - _restart.assert_not_called() - - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - @patch("charm.Patroni.get_primary") - def test_set_active_status(self, _get_primary, _member_started): for values in itertools.product( [ RetryError(last_attempt=1), ConnectionError, - self.charm.unit.name, - f"{self.charm.app.name}/2", + harness.charm.unit.name, + f"{harness.charm.app.name}/2", ], [True, False], ): - self.charm.unit.status = MaintenanceStatus("fake status") + harness.charm.unit.status = MaintenanceStatus("fake status") _member_started.return_value = values[1] if isinstance(values[0], str): _get_primary.side_effect = None _get_primary.return_value = values[0] - self.charm._set_active_status() - self.assertIsInstance( - self.charm.unit.status, + harness.charm._set_active_status() + tc.assertIsInstance( + harness.charm.unit.status, ActiveStatus - if values[0] == self.charm.unit.name or values[1] + if values[0] == harness.charm.unit.name or values[1] else MaintenanceStatus, ) - self.assertEqual( - self.charm.unit.status.message, + tc.assertEqual( + harness.charm.unit.status.message, "Primary" - if values[0] == self.charm.unit.name + if values[0] == harness.charm.unit.name else ("" if values[1] else "fake status"), ) else: _get_primary.side_effect = values[0] _get_primary.return_value = None - self.charm._set_active_status() - self.assertIsInstance(self.charm.unit.status, MaintenanceStatus) + harness.charm._set_active_status() + tc.assertIsInstance(harness.charm.unit.status, MaintenanceStatus) diff --git a/tests/unit/test_db.py b/tests/unit/test_db.py index 502c1a759c..f4d9629a55 100644 --- a/tests/unit/test_db.py +++ b/tests/unit/test_db.py @@ -1,9 +1,10 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. -import unittest +from unittest import TestCase from unittest.mock import Mock, PropertyMock, patch +import pytest from charms.postgresql_k8s.v0.postgresql import ( PostgreSQLCreateDatabaseError, PostgreSQLCreateUserError, @@ -21,149 +22,153 @@ RELATION_NAME = "db" POSTGRESQL_VERSION = "14" +# used for assert functions +tc = TestCase() -class TestDbProvides(unittest.TestCase): - @patch("charm.KubernetesServicePatch", lambda x, y: None) - def setUp(self): - self.harness = Harness(PostgresqlOperatorCharm) - self.addCleanup(self.harness.cleanup) + +@pytest.fixture(autouse=True) +def harness(): + with patch("charm.KubernetesServicePatch", lambda x, y: None): + harness = Harness(PostgresqlOperatorCharm) # Set up the initial relation and hooks. - self.harness.set_leader(True) - self.harness.begin() - self.app = self.harness.charm.app.name - self.unit = self.harness.charm.unit.name + harness.set_leader(True) + harness.begin() # Define some relations. - self.rel_id = self.harness.add_relation(RELATION_NAME, "application") - self.harness.add_relation_unit(self.rel_id, "application/0") - self.peer_rel_id = self.harness.add_relation(PEER, self.app) - self.harness.add_relation_unit(self.peer_rel_id, f"{self.app}/1") - self.harness.add_relation_unit(self.peer_rel_id, self.unit) - self.harness.update_relation_data( - self.peer_rel_id, - self.app, + rel_id = harness.add_relation(RELATION_NAME, "application") + harness.add_relation_unit(rel_id, "application/0") + peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) + harness.add_relation_unit(peer_rel_id, f"{harness.charm.app.name}/1") + harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, {"cluster_initialised": "True"}, ) - self.legacy_db_relation = self.harness.charm.legacy_db_relation - - def clear_relation_data(self): - data = { - "allowed-subnets": "", - "allowed-units": "", - "host": "", - "port": "", - "master": "", - "standbys": "", - "version": "", - "user": "", - "password": "", - "database": "", - "extensions": "", - } - self.harness.update_relation_data(self.rel_id, self.app, data) - self.harness.update_relation_data(self.rel_id, self.unit, data) - - def request_database(self): - # Reset the charm status. - self.harness.model.unit.status = ActiveStatus() + yield harness + harness.cleanup() + + +def clear_relation_data(_harness): + data = { + "allowed-subnets": "", + "allowed-units": "", + "host": "", + "port": "", + "master": "", + "standbys": "", + "version": "", + "user": "", + "password": "", + "database": "", + "extensions": "", + } + rel_id = _harness.model.get_relation(RELATION_NAME).id + _harness.update_relation_data(rel_id, _harness.charm.app.name, data) + _harness.update_relation_data(rel_id, _harness.charm.unit.name, data) + + +def request_database(_harness): + # Reset the charm status. + _harness.model.unit.status = ActiveStatus() + rel_id = _harness.model.get_relation(RELATION_NAME).id + + with _harness.hooks_disabled(): + # Reset the application databag. + _harness.update_relation_data( + rel_id, + "application/0", + {"database": ""}, + ) - with self.harness.hooks_disabled(): - # Reset the application databag. - self.harness.update_relation_data( - self.rel_id, - "application/0", - {"database": ""}, - ) + # Reset the database databag. + clear_relation_data(_harness) - # Reset the database databag. - self.clear_relation_data() + # Simulate the request of a new database. + _harness.update_relation_data( + rel_id, + "application/0", + {"database": DATABASE}, + ) - # Simulate the request of a new database. - self.harness.update_relation_data( - self.rel_id, - "application/0", - {"database": DATABASE}, - ) - @patch("charm.DbProvides.set_up_relation") - @patch.object(EventBase, "defer") - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - def test_on_relation_changed( - self, - _member_started, - _defer, - _set_up_relation, +def test_on_relation_changed(harness): + with ( + patch("charm.DbProvides.set_up_relation") as _set_up_relation, + patch.object(EventBase, "defer") as _defer, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, ): + peer_rel_id = harness.model.get_relation(PEER).id # Set some side effects to test multiple situations. _member_started.side_effect = [False, False, True, True] # Request a database before the cluster is initialised. - self.request_database() + request_database(harness) _defer.assert_called_once() _set_up_relation.assert_not_called() # Request a database before the database is ready. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, - self.harness.charm.app.name, + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, {"cluster_initialised": "True"}, ) - self.request_database() - self.assertEqual(_defer.call_count, 2) + request_database(harness) + tc.assertEqual(_defer.call_count, 2) _set_up_relation.assert_not_called() # Request a database to a non leader unit. _defer.reset_mock() - with self.harness.hooks_disabled(): - self.harness.set_leader(False) - self.request_database() + with harness.hooks_disabled(): + harness.set_leader(False) + request_database(harness) _defer.assert_not_called() _set_up_relation.assert_not_called() # Request it again in a leader unit. - with self.harness.hooks_disabled(): - self.harness.set_leader() - self.request_database() + with harness.hooks_disabled(): + harness.set_leader() + request_database(harness) _defer.assert_not_called() _set_up_relation.assert_called_once() - @patch("charm.KubernetesServicePatch", lambda x, y: None) - def test_get_extensions(self): + +def test_get_extensions(harness): + with patch("charm.KubernetesServicePatch", lambda x, y: None): # Test when there are no extensions in the relation databags. - relation = self.harness.model.get_relation(RELATION_NAME, self.rel_id) - self.assertEqual( - self.harness.charm.legacy_db_relation._get_extensions(relation), ([], set()) - ) + rel_id = harness.model.get_relation(RELATION_NAME).id + relation = harness.model.get_relation(RELATION_NAME, rel_id) + tc.assertEqual(harness.charm.legacy_db_relation._get_extensions(relation), ([], set())) # Test when there are extensions in the application relation databag. extensions = ["", "citext:public", "debversion"] - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, "application", {"extensions": ",".join(extensions)}, ) - self.assertEqual( - self.harness.charm.legacy_db_relation._get_extensions(relation), + tc.assertEqual( + harness.charm.legacy_db_relation._get_extensions(relation), ([extensions[1], extensions[2]], {extensions[1].split(":")[0], extensions[2]}), ) # Test when there are extensions in the unit relation databag. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, "application", {"extensions": ""}, ) - self.harness.update_relation_data( - self.rel_id, + harness.update_relation_data( + rel_id, "application/0", {"extensions": ",".join(extensions)}, ) - self.assertEqual( - self.harness.charm.legacy_db_relation._get_extensions(relation), + tc.assertEqual( + harness.charm.legacy_db_relation._get_extensions(relation), ([extensions[1], extensions[2]], {extensions[1].split(":")[0], extensions[2]}), ) @@ -176,321 +181,332 @@ def test_get_extensions(self): default: false type: boolean""" harness = Harness(PostgresqlOperatorCharm, config=config) - self.addCleanup(harness.cleanup) + harness.cleanup() harness.begin() - self.assertEqual( + tc.assertEqual( harness.charm.legacy_db_relation._get_extensions(relation), ([extensions[1], extensions[2]], {extensions[2]}), ) - @patch("relations.db.DbProvides._update_unit_status") - @patch("relations.db.new_password", return_value="test-password") - @patch("relations.db.DbProvides._get_extensions") - def test_set_up_relation( - self, - _get_extensions, - _new_password, - _update_unit_status, + +def test_set_up_relation(harness): + with ( + patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, + patch("relations.db.DbProvides._update_unit_status") as _update_unit_status, + patch("relations.db.new_password", return_value="test-password") as _new_password, + patch("relations.db.DbProvides._get_extensions") as _get_extensions, ): - with patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock: - # Define some mocks' side effects. - extensions = ["citext:public", "debversion"] - _get_extensions.side_effect = [ - (extensions, {"debversion"}), - (extensions, set()), - (extensions, set()), - (extensions, set()), - (extensions, set()), - (extensions, set()), - (extensions, set()), + rel_id = harness.model.get_relation(RELATION_NAME).id + # Define some mocks' side effects. + extensions = ["citext:public", "debversion"] + _get_extensions.side_effect = [ + (extensions, {"debversion"}), + (extensions, set()), + (extensions, set()), + (extensions, set()), + (extensions, set()), + (extensions, set()), + (extensions, set()), + ] + postgresql_mock.create_user = PropertyMock( + side_effect=[None, None, PostgreSQLCreateUserError, None, None] + ) + postgresql_mock.create_database = PropertyMock( + side_effect=[None, None, PostgreSQLCreateDatabaseError, None] + ) + postgresql_mock.get_postgresql_version = PropertyMock( + side_effect=[ + POSTGRESQL_VERSION, + POSTGRESQL_VERSION, + POSTGRESQL_VERSION, + POSTGRESQL_VERSION, + POSTGRESQL_VERSION, + PostgreSQLGetPostgreSQLVersionError, ] - postgresql_mock.create_user = PropertyMock( - side_effect=[None, None, PostgreSQLCreateUserError, None, None] + ) + + # Assert no operation is done when at least one of the requested extensions + # is disabled. + relation = harness.model.get_relation(RELATION_NAME, rel_id) + tc.assertFalse(harness.charm.legacy_db_relation.set_up_relation(relation)) + postgresql_mock.create_user.assert_not_called() + postgresql_mock.create_database.assert_not_called() + postgresql_mock.get_postgresql_version.assert_not_called() + _update_unit_status.assert_not_called() + + # Assert that the correct calls were made in a successful setup. + harness.charm.unit.status = ActiveStatus() + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + "application", + {"database": DATABASE}, ) - postgresql_mock.create_database = PropertyMock( - side_effect=[None, None, PostgreSQLCreateDatabaseError, None] + tc.assertTrue(harness.charm.legacy_db_relation.set_up_relation(relation)) + user = f"relation_id_{rel_id}" + postgresql_mock.create_user.assert_called_once_with(user, "test-password", False) + postgresql_mock.create_database.assert_called_once_with( + DATABASE, user, plugins=[], client_relations=[relation] + ) + tc.assertEqual(postgresql_mock.get_postgresql_version.call_count, 2) + _update_unit_status.assert_called_once() + expected_data = { + "allowed-units": "application/0", + "database": DATABASE, + "extensions": ",".join(extensions), + "host": f"postgresql-k8s-0.postgresql-k8s-endpoints.{harness.model.name}.svc.cluster.local", + "master": f"dbname={DATABASE} fallback_application_name=application " + f"host=postgresql-k8s-primary.{harness.model.name}.svc.cluster.local " + f"password=test-password port=5432 user=relation_id_{rel_id}", + "password": "test-password", + "port": DATABASE_PORT, + "standbys": f"dbname={DATABASE} fallback_application_name=application " + f"host=postgresql-k8s-replicas.{harness.model.name}.svc.cluster.local " + f"password=test-password port=5432 user=relation_id_{rel_id}", + "user": f"relation_id_{rel_id}", + "version": POSTGRESQL_VERSION, + } + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app.name), expected_data) + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.unit.name), expected_data) + tc.assertNotIsInstance(harness.model.unit.status, BlockedStatus) + + # Assert that the correct calls were made when the database name is + # provided only in the unit databag. + postgresql_mock.create_user.reset_mock() + postgresql_mock.create_database.reset_mock() + postgresql_mock.get_postgresql_version.reset_mock() + _update_unit_status.reset_mock() + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + "application", + {"database": ""}, ) - postgresql_mock.get_postgresql_version = PropertyMock( - side_effect=[ - POSTGRESQL_VERSION, - POSTGRESQL_VERSION, - POSTGRESQL_VERSION, - POSTGRESQL_VERSION, - POSTGRESQL_VERSION, - PostgreSQLGetPostgreSQLVersionError, - ] + harness.update_relation_data( + rel_id, + "application/0", + {"database": DATABASE}, ) - - # Assert no operation is done when at least one of the requested extensions - # is disabled. - relation = self.harness.model.get_relation(RELATION_NAME, self.rel_id) - self.assertFalse(self.harness.charm.legacy_db_relation.set_up_relation(relation)) - postgresql_mock.create_user.assert_not_called() - postgresql_mock.create_database.assert_not_called() - postgresql_mock.get_postgresql_version.assert_not_called() - _update_unit_status.assert_not_called() - - # Assert that the correct calls were made in a successful setup. - self.harness.charm.unit.status = ActiveStatus() - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, - "application", - {"database": DATABASE}, - ) - self.assertTrue(self.harness.charm.legacy_db_relation.set_up_relation(relation)) - user = f"relation_id_{self.rel_id}" - postgresql_mock.create_user.assert_called_once_with(user, "test-password", False) - postgresql_mock.create_database.assert_called_once_with( - DATABASE, user, plugins=[], client_relations=[relation] + clear_relation_data(harness) + tc.assertTrue(harness.charm.legacy_db_relation.set_up_relation(relation)) + postgresql_mock.create_user.assert_called_once_with(user, "test-password", False) + postgresql_mock.create_database.assert_called_once_with( + DATABASE, user, plugins=[], client_relations=[relation] + ) + tc.assertEqual(postgresql_mock.get_postgresql_version.call_count, 2) + _update_unit_status.assert_called_once() + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app.name), expected_data) + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.unit.name), expected_data) + tc.assertNotIsInstance(harness.model.unit.status, BlockedStatus) + + # Assert that the correct calls were made when the database name is not provided. + postgresql_mock.create_user.reset_mock() + postgresql_mock.create_database.reset_mock() + postgresql_mock.get_postgresql_version.reset_mock() + _update_unit_status.reset_mock() + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + "application/0", + {"database": ""}, ) - self.assertEqual(postgresql_mock.get_postgresql_version.call_count, 2) - _update_unit_status.assert_called_once() - expected_data = { - "allowed-units": "application/0", - "database": DATABASE, - "extensions": ",".join(extensions), - "host": f"postgresql-k8s-0.postgresql-k8s-endpoints.{self.harness.model.name}.svc.cluster.local", - "master": f"dbname={DATABASE} fallback_application_name=application " - f"host=postgresql-k8s-primary.{self.harness.model.name}.svc.cluster.local " - f"password=test-password port=5432 user=relation_id_{self.rel_id}", - "password": "test-password", - "port": DATABASE_PORT, - "standbys": f"dbname={DATABASE} fallback_application_name=application " - f"host=postgresql-k8s-replicas.{self.harness.model.name}.svc.cluster.local " - f"password=test-password port=5432 user=relation_id_{self.rel_id}", - "user": f"relation_id_{self.rel_id}", - "version": POSTGRESQL_VERSION, - } - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.app), expected_data) - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.unit), expected_data) - self.assertNotIsInstance(self.harness.model.unit.status, BlockedStatus) - - # Assert that the correct calls were made when the database name is - # provided only in the unit databag. - postgresql_mock.create_user.reset_mock() - postgresql_mock.create_database.reset_mock() - postgresql_mock.get_postgresql_version.reset_mock() - _update_unit_status.reset_mock() - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, - "application", - {"database": ""}, - ) - self.harness.update_relation_data( - self.rel_id, - "application/0", - {"database": DATABASE}, - ) - self.clear_relation_data() - self.assertTrue(self.harness.charm.legacy_db_relation.set_up_relation(relation)) - postgresql_mock.create_user.assert_called_once_with(user, "test-password", False) - postgresql_mock.create_database.assert_called_once_with( - DATABASE, user, plugins=[], client_relations=[relation] + clear_relation_data(harness) + tc.assertFalse(harness.charm.legacy_db_relation.set_up_relation(relation)) + postgresql_mock.create_user.assert_not_called() + postgresql_mock.create_database.assert_not_called() + postgresql_mock.get_postgresql_version.assert_not_called() + _update_unit_status.assert_not_called() + # No data is set in the databags by the database. + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app.name), {}) + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.unit.name), {}) + tc.assertNotIsInstance(harness.model.unit.status, BlockedStatus) + + # BlockedStatus due to a PostgreSQLCreateUserError. + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + "application", + {"database": DATABASE}, ) - self.assertEqual(postgresql_mock.get_postgresql_version.call_count, 2) - _update_unit_status.assert_called_once() - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.app), expected_data) - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.unit), expected_data) - self.assertNotIsInstance(self.harness.model.unit.status, BlockedStatus) - - # Assert that the correct calls were made when the database name is not provided. - postgresql_mock.create_user.reset_mock() - postgresql_mock.create_database.reset_mock() - postgresql_mock.get_postgresql_version.reset_mock() - _update_unit_status.reset_mock() - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, - "application/0", - {"database": ""}, - ) - self.clear_relation_data() - self.assertFalse(self.harness.charm.legacy_db_relation.set_up_relation(relation)) - postgresql_mock.create_user.assert_not_called() - postgresql_mock.create_database.assert_not_called() - postgresql_mock.get_postgresql_version.assert_not_called() - _update_unit_status.assert_not_called() - # No data is set in the databags by the database. - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.app), {}) - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.unit), {}) - self.assertNotIsInstance(self.harness.model.unit.status, BlockedStatus) - - # BlockedStatus due to a PostgreSQLCreateUserError. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, - "application", - {"database": DATABASE}, - ) - self.assertFalse(self.harness.charm.legacy_db_relation.set_up_relation(relation)) - postgresql_mock.create_database.assert_not_called() - postgresql_mock.get_postgresql_version.assert_not_called() - _update_unit_status.assert_not_called() - self.assertIsInstance(self.harness.model.unit.status, BlockedStatus) - # No data is set in the databags by the database. - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.app), {}) - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.unit), {}) - - # BlockedStatus due to a PostgreSQLCreateDatabaseError. - self.harness.charm.unit.status = ActiveStatus() - self.assertFalse(self.harness.charm.legacy_db_relation.set_up_relation(relation)) - postgresql_mock.get_postgresql_version.assert_not_called() - _update_unit_status.assert_not_called() - self.assertIsInstance(self.harness.model.unit.status, BlockedStatus) - # No data is set in the databags by the database. - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.app), {}) - self.assertEqual(self.harness.get_relation_data(self.rel_id, self.unit), {}) - - # BlockedStatus due to a PostgreSQLGetPostgreSQLVersionError. - self.harness.charm.unit.status = ActiveStatus() - self.assertFalse(self.harness.charm.legacy_db_relation.set_up_relation(relation)) - _update_unit_status.assert_not_called() - self.assertIsInstance(self.harness.model.unit.status, BlockedStatus) - - @patch("relations.db.DbProvides._check_for_blocking_relations") - @patch("charm.PostgresqlOperatorCharm._has_blocked_status", new_callable=PropertyMock) - def test_update_unit_status(self, _has_blocked_status, _check_for_blocking_relations): + tc.assertFalse(harness.charm.legacy_db_relation.set_up_relation(relation)) + postgresql_mock.create_database.assert_not_called() + postgresql_mock.get_postgresql_version.assert_not_called() + _update_unit_status.assert_not_called() + tc.assertIsInstance(harness.model.unit.status, BlockedStatus) + # No data is set in the databags by the database. + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app.name), {}) + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.unit.name), {}) + + # BlockedStatus due to a PostgreSQLCreateDatabaseError. + harness.charm.unit.status = ActiveStatus() + tc.assertFalse(harness.charm.legacy_db_relation.set_up_relation(relation)) + postgresql_mock.get_postgresql_version.assert_not_called() + _update_unit_status.assert_not_called() + tc.assertIsInstance(harness.model.unit.status, BlockedStatus) + # No data is set in the databags by the database. + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.app.name), {}) + tc.assertEqual(harness.get_relation_data(rel_id, harness.charm.unit.name), {}) + + # BlockedStatus due to a PostgreSQLGetPostgreSQLVersionError. + harness.charm.unit.status = ActiveStatus() + tc.assertFalse(harness.charm.legacy_db_relation.set_up_relation(relation)) + _update_unit_status.assert_not_called() + tc.assertIsInstance(harness.model.unit.status, BlockedStatus) + + +def test_update_unit_status(harness): + with ( + patch( + "relations.db.DbProvides._check_for_blocking_relations" + ) as _check_for_blocking_relations, + patch( + "charm.PostgresqlOperatorCharm._has_blocked_status", new_callable=PropertyMock + ) as _has_blocked_status, + ): + rel_id = harness.model.get_relation(RELATION_NAME).id # Test when the charm is not blocked. - relation = self.harness.model.get_relation(RELATION_NAME, self.rel_id) + relation = harness.model.get_relation(RELATION_NAME, rel_id) _has_blocked_status.return_value = False - self.harness.charm.legacy_db_relation._update_unit_status(relation) + harness.charm.legacy_db_relation._update_unit_status(relation) _check_for_blocking_relations.assert_not_called() - self.assertNotIsInstance(self.harness.charm.unit.status, ActiveStatus) + tc.assertNotIsInstance(harness.charm.unit.status, ActiveStatus) # Test when the charm is blocked but not due to extensions request. _has_blocked_status.return_value = True - self.harness.charm.unit.status = BlockedStatus("fake message") - self.harness.charm.legacy_db_relation._update_unit_status(relation) + harness.charm.unit.status = BlockedStatus("fake message") + harness.charm.legacy_db_relation._update_unit_status(relation) _check_for_blocking_relations.assert_not_called() - self.assertNotIsInstance(self.harness.charm.unit.status, ActiveStatus) + tc.assertNotIsInstance(harness.charm.unit.status, ActiveStatus) # Test when there are relations causing the blocked status. - self.harness.charm.unit.status = BlockedStatus("extensions requested through relation") + harness.charm.unit.status = BlockedStatus("extensions requested through relation") _check_for_blocking_relations.return_value = True - self.harness.charm.legacy_db_relation._update_unit_status(relation) + harness.charm.legacy_db_relation._update_unit_status(relation) _check_for_blocking_relations.assert_called_once_with(relation.id) - self.assertNotIsInstance(self.harness.charm.unit.status, ActiveStatus) + tc.assertNotIsInstance(harness.charm.unit.status, ActiveStatus) # Test when there are no relations causing the blocked status anymore. _check_for_blocking_relations.reset_mock() _check_for_blocking_relations.return_value = False - self.harness.charm.legacy_db_relation._update_unit_status(relation) + harness.charm.legacy_db_relation._update_unit_status(relation) _check_for_blocking_relations.assert_called_once_with(relation.id) - self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) + tc.assertIsInstance(harness.charm.unit.status, ActiveStatus) - @patch("charm.Patroni.member_started", new_callable=PropertyMock(return_value=True)) - def test_on_relation_departed(self, _): + +def test_on_relation_departed(harness): + with patch("charm.Patroni.member_started", new_callable=PropertyMock(return_value=True)): # Test when this unit is departing the relation (due to a scale down event). - self.assertNotIn( - "departing", self.harness.get_relation_data(self.peer_rel_id, self.harness.charm.unit) - ) + peer_rel_id = harness.model.get_relation(PEER).id + tc.assertNotIn("departing", harness.get_relation_data(peer_rel_id, harness.charm.unit)) event = Mock() - event.relation.data = {self.harness.charm.app: {}, self.harness.charm.unit: {}} - event.departing_unit = self.harness.charm.unit - self.harness.charm.legacy_db_relation._on_relation_departed(event) - self.assertIn( - "departing", self.harness.get_relation_data(self.peer_rel_id, self.harness.charm.unit) - ) + event.relation.data = {harness.charm.app: {}, harness.charm.unit: {}} + event.departing_unit = harness.charm.unit + harness.charm.legacy_db_relation._on_relation_departed(event) + tc.assertIn("departing", harness.get_relation_data(peer_rel_id, harness.charm.unit)) # Test when this unit is departing the relation (due to the relation being broken between the apps). - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, self.harness.charm.unit.name, {"departing": ""} - ) - event.relation.data = {self.harness.charm.app: {}, self.harness.charm.unit: {}} - event.departing_unit = Unit( - f"{self.harness.charm.app}/1", None, self.harness.charm.app._backend, {} - ) - self.harness.charm.legacy_db_relation._on_relation_departed(event) - relation_data = self.harness.get_relation_data(self.peer_rel_id, self.harness.charm.unit) - self.assertNotIn("departing", relation_data) - - @patch("charm.Patroni.member_started", new_callable=PropertyMock(return_value=True)) - def test_on_relation_broken(self, _member_started): - with self.harness.hooks_disabled(): - self.harness.set_leader() + with harness.hooks_disabled(): + harness.update_relation_data(peer_rel_id, harness.charm.unit.name, {"departing": ""}) + event.relation.data = {harness.charm.app: {}, harness.charm.unit: {}} + event.departing_unit = Unit(f"{harness.charm.app}/1", None, harness.charm.app._backend, {}) + harness.charm.legacy_db_relation._on_relation_departed(event) + relation_data = harness.get_relation_data(peer_rel_id, harness.charm.unit) + tc.assertNotIn("departing", relation_data) + + +def test_on_relation_broken(harness): + with patch( + "charm.Patroni.member_started", new_callable=PropertyMock(return_value=True) + ) as _member_started: + rel_id = harness.model.get_relation(RELATION_NAME).id + peer_rel_id = harness.model.get_relation(PEER).id + with harness.hooks_disabled(): + harness.set_leader() with patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock: # Test when this unit is departing the relation (due to the relation being broken between the apps). event = Mock() - event.relation.id = self.rel_id - self.harness.charm.legacy_db_relation._on_relation_broken(event) - user = f"relation_id_{self.rel_id}" + event.relation.id = rel_id + harness.charm.legacy_db_relation._on_relation_broken(event) + user = f"relation_id_{rel_id}" postgresql_mock.delete_user.assert_called_once_with(user) # Test when this unit is departing the relation (due to a scale down event). postgresql_mock.reset_mock() - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, self.harness.charm.unit.name, {"departing": "True"} + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, harness.charm.unit.name, {"departing": "True"} ) - self.harness.charm.legacy_db_relation._on_relation_broken(event) + harness.charm.legacy_db_relation._on_relation_broken(event) postgresql_mock.delete_user.assert_not_called() - @patch( - "charm.PostgresqlOperatorCharm.primary_endpoint", - new_callable=PropertyMock, - ) - @patch("charm.PostgresqlOperatorCharm._has_blocked_status", new_callable=PropertyMock) - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - @patch("charm.DbProvides._on_relation_departed") - def test_on_relation_broken_extensions_unblock( - self, _on_relation_departed, _member_started, _primary_endpoint, _has_blocked_status - ): - with patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock: - # Set some side effects to test multiple situations. - _has_blocked_status.return_value = True - _member_started.return_value = True - _primary_endpoint.return_value = {"1.1.1.1"} - postgresql_mock.delete_user = PropertyMock(return_value=None) - self.harness.model.unit.status = BlockedStatus("extensions requested through relation") - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.rel_id, - "application", - {"database": DATABASE, "extensions": "test"}, - ) - # Break the relation that blocked the charm. - self.harness.remove_relation(self.rel_id) - self.assertTrue(isinstance(self.harness.model.unit.status, ActiveStatus)) +def test_on_relation_broken_extensions_unblock(harness): + with ( + patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, + patch( + "charm.PostgresqlOperatorCharm.primary_endpoint", + new_callable=PropertyMock, + ) as _primary_endpoint, + patch( + "charm.PostgresqlOperatorCharm._has_blocked_status", new_callable=PropertyMock + ) as _has_blocked_status, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("charm.DbProvides._on_relation_departed") as _on_relation_departed, + ): + rel_id = harness.model.get_relation(RELATION_NAME).id + # Set some side effects to test multiple situations. + _has_blocked_status.return_value = True + _member_started.return_value = True + _primary_endpoint.return_value = {"1.1.1.1"} + postgresql_mock.delete_user = PropertyMock(return_value=None) + harness.model.unit.status = BlockedStatus("extensions requested through relation") + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + "application", + {"database": DATABASE, "extensions": "test"}, + ) - @patch( - "charm.PostgresqlOperatorCharm.primary_endpoint", - new_callable=PropertyMock, - ) - @patch("charm.PostgresqlOperatorCharm.is_blocked", new_callable=PropertyMock) - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - @patch("charm.DbProvides._on_relation_departed") - def test_on_relation_broken_extensions_keep_block( - self, _on_relation_departed, _member_started, _primary_endpoint, is_blocked + # Break the relation that blocked the charm. + harness.remove_relation(rel_id) + tc.assertTrue(isinstance(harness.model.unit.status, ActiveStatus)) + + +def test_on_relation_broken_extensions_keep_block(harness): + with ( + patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, + patch( + "charm.PostgresqlOperatorCharm.primary_endpoint", + new_callable=PropertyMock, + ) as _primary_endpoint, + patch("charm.PostgresqlOperatorCharm.is_blocked", new_callable=PropertyMock) as is_blocked, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("charm.DbProvides._on_relation_departed") as _on_relation_departed, ): - with patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock: - # Set some side effects to test multiple situations. - is_blocked.return_value = True - _member_started.return_value = True - _primary_endpoint.return_value = {"1.1.1.1"} - postgresql_mock.delete_user = PropertyMock(return_value=None) - self.harness.model.unit.status = BlockedStatus( - "extensions requested through relation, enable them through config options" + # Set some side effects to test multiple situations. + is_blocked.return_value = True + _member_started.return_value = True + _primary_endpoint.return_value = {"1.1.1.1"} + postgresql_mock.delete_user = PropertyMock(return_value=None) + harness.model.unit.status = BlockedStatus( + "extensions requested through relation, enable them through config options" + ) + with harness.hooks_disabled(): + first_rel_id = harness.add_relation(RELATION_NAME, "application1") + harness.update_relation_data( + first_rel_id, + "application1", + {"database": DATABASE, "extensions": "test"}, + ) + second_rel_id = harness.add_relation(RELATION_NAME, "application2") + harness.update_relation_data( + second_rel_id, + "application2", + {"database": DATABASE, "extensions": "test"}, ) - with self.harness.hooks_disabled(): - first_rel_id = self.harness.add_relation(RELATION_NAME, "application1") - self.harness.update_relation_data( - first_rel_id, - "application1", - {"database": DATABASE, "extensions": "test"}, - ) - second_rel_id = self.harness.add_relation(RELATION_NAME, "application2") - self.harness.update_relation_data( - second_rel_id, - "application2", - {"database": DATABASE, "extensions": "test"}, - ) - event = Mock() - event.relation.id = first_rel_id - # Break one of the relations that block the charm. - self.harness.charm.legacy_db_relation._on_relation_broken(event) - self.assertTrue(isinstance(self.harness.model.unit.status, BlockedStatus)) + event = Mock() + event.relation.id = first_rel_id + # Break one of the relations that block the charm. + harness.charm.legacy_db_relation._on_relation_broken(event) + tc.assertTrue(isinstance(harness.model.unit.status, BlockedStatus)) diff --git a/tests/unit/test_patroni.py b/tests/unit/test_patroni.py index 982ad9920e..126a82cbe8 100644 --- a/tests/unit/test_patroni.py +++ b/tests/unit/test_patroni.py @@ -2,9 +2,10 @@ # Copyright 2021 Canonical Ltd. # See LICENSE file for licensing details. -import unittest +from unittest import TestCase from unittest.mock import MagicMock, PropertyMock, mock_open, patch +import pytest import tenacity from jinja2 import Template from ops.testing import Harness @@ -13,21 +14,27 @@ from charm import PostgresqlOperatorCharm from constants import REWIND_USER from patroni import Patroni, SwitchoverFailedError -from tests.helpers import STORAGE_PATH, patch_network_get +from tests.helpers import STORAGE_PATH +# used for assert functions +tc = TestCase() -class TestPatroni(unittest.TestCase): - @patch("charm.KubernetesServicePatch", lambda x, y: None) - @patch_network_get(private_address="1.1.1.1") - def setUp(self): - self.harness = Harness(PostgresqlOperatorCharm) - self.addCleanup(self.harness.cleanup) - self.harness.begin() - self.charm = self.harness.charm +@pytest.fixture(autouse=True) +def harness(): + with patch("charm.KubernetesServicePatch", lambda x, y: None): + harness = Harness(PostgresqlOperatorCharm) + harness.begin() + yield harness + harness.cleanup() + + +@pytest.fixture(autouse=True) +def patroni(harness): + with patch("charm.KubernetesServicePatch", lambda x, y: None): # Setup Patroni wrapper. - self.patroni = Patroni( - self.charm, + patroni = Patroni( + harness.charm, "postgresql-k8s-0", ["postgresql-k8s-0", "postgresql-k8s-1", "postgresql-k8s-2"], "postgresql-k8s-primary.dev.svc.cluster.local", @@ -38,9 +45,11 @@ def setUp(self): "rewind-password", False, ) + yield patroni + - @patch("requests.get") - def test_get_primary(self, _get): +def test_get_primary(harness, patroni): + with patch("requests.get") as _get: # Mock Patroni cluster API. _get.return_value.json.return_value = { "members": [ @@ -51,18 +60,19 @@ def test_get_primary(self, _get): } # Test returning pod name. - primary = self.patroni.get_primary() - self.assertEqual(primary, "postgresql-k8s-1") + primary = patroni.get_primary() + tc.assertEqual(primary, "postgresql-k8s-1") _get.assert_called_once_with("http://postgresql-k8s-0:8008/cluster", verify=True) # Test returning unit name. _get.reset_mock() - primary = self.patroni.get_primary(unit_name_pattern=True) - self.assertEqual(primary, "postgresql-k8s/1") + primary = patroni.get_primary(unit_name_pattern=True) + tc.assertEqual(primary, "postgresql-k8s/1") _get.assert_called_once_with("http://postgresql-k8s-0:8008/cluster", verify=True) - @patch("requests.get") - def test_is_creating_backup(self, _get): + +def test_is_creating_backup(harness, patroni): + with patch("requests.get") as _get: # Test when one member is creating a backup. response = _get.return_value response.json.return_value = { @@ -71,21 +81,24 @@ def test_is_creating_backup(self, _get): {"name": "postgresql-k8s-1", "tags": {"is_creating_backup": True}}, ] } - self.assertTrue(self.patroni.is_creating_backup) + tc.assertTrue(patroni.is_creating_backup) # Test when no member is creating a backup. response.json.return_value = { "members": [{"name": "postgresql-k8s-0"}, {"name": "postgresql-k8s-1"}] } - self.assertFalse(self.patroni.is_creating_backup) + tc.assertFalse(patroni.is_creating_backup) + - @patch("requests.get") - @patch("charm.Patroni.get_primary") - @patch("patroni.stop_after_delay", return_value=stop_after_delay(0)) - def test_is_replication_healthy(self, _, __, _get): +def test_is_replication_healthy(harness, patroni): + with ( + patch("requests.get") as _get, + patch("charm.Patroni.get_primary"), + patch("patroni.stop_after_delay", return_value=stop_after_delay(0)), + ): # Test when replication is healthy. _get.return_value.status_code = 200 - self.assertTrue(self.patroni.is_replication_healthy) + tc.assertTrue(patroni.is_replication_healthy) # Test when replication is not healthy. _get.side_effect = [ @@ -93,31 +106,37 @@ def test_is_replication_healthy(self, _, __, _get): MagicMock(status_code=200), MagicMock(status_code=503), ] - self.assertFalse(self.patroni.is_replication_healthy) + tc.assertFalse(patroni.is_replication_healthy) + - @patch("requests.get") - @patch("patroni.stop_after_delay", return_value=stop_after_delay(0)) - def test_member_streaming(self, _, _get): +def test_member_streaming(harness, patroni): + with ( + patch("requests.get") as _get, + patch("patroni.stop_after_delay", return_value=stop_after_delay(0)), + ): # Test when the member is streaming from primary. _get.return_value.json.return_value = {"replication_state": "streaming"} - self.assertTrue(self.patroni.member_streaming) + tc.assertTrue(patroni.member_streaming) # Test when the member is not streaming from primary. _get.return_value.json.return_value = {"replication_state": "running"} - self.assertFalse(self.patroni.member_streaming) + tc.assertFalse(patroni.member_streaming) _get.return_value.json.return_value = {} - self.assertFalse(self.patroni.member_streaming) + tc.assertFalse(patroni.member_streaming) # Test when an error happens. _get.side_effect = RetryError - self.assertFalse(self.patroni.member_streaming) + tc.assertFalse(patroni.member_streaming) - @patch("os.chmod") - @patch("os.chown") - @patch("pwd.getpwnam") - @patch("tempfile.NamedTemporaryFile") - def test_render_file(self, _temp_file, _pwnam, _chown, _chmod): + +def test_render_file(harness, patroni): + with ( + patch("os.chmod") as _chmod, + patch("os.chown") as _chown, + patch("pwd.getpwnam") as _pwnam, + patch("tempfile.NamedTemporaryFile") as _temp_file, + ): # Set a mocked temporary filename. filename = "/tmp/temporaryfilename" _temp_file.return_value.name = filename @@ -129,10 +148,10 @@ def test_render_file(self, _temp_file, _pwnam, _chown, _chmod): _pwnam.return_value.pw_uid = 35 _pwnam.return_value.pw_gid = 35 # Call the method using a temporary configuration file. - self.patroni._render_file(filename, "rendered-content", 0o640) + patroni._render_file(filename, "rendered-content", 0o640) # Check the rendered file is opened with "w+" mode. - self.assertEqual(mock.call_args_list[0][0], (filename, "w+")) + tc.assertEqual(mock.call_args_list[0][0], (filename, "w+")) # Ensure that the correct user is lookup up. _pwnam.assert_called_with("postgres") # Ensure the file is chmod'd correctly. @@ -140,24 +159,29 @@ def test_render_file(self, _temp_file, _pwnam, _chown, _chmod): # Ensure the file is chown'd correctly. _chown.assert_called_with(filename, uid=35, gid=35) - @patch("charm.Patroni.rock_postgresql_version", new_callable=PropertyMock) - @patch("charm.Patroni._render_file") - def test_render_patroni_yml_file(self, _render_file, _rock_postgresql_version): + +def test_render_patroni_yml_file(harness, patroni): + with ( + patch( + "charm.Patroni.rock_postgresql_version", new_callable=PropertyMock + ) as _rock_postgresql_version, + patch("charm.Patroni._render_file") as _render_file, + ): _rock_postgresql_version.return_value = "14.7" # Get the expected content from a file. with open("templates/patroni.yml.j2") as file: template = Template(file.read()) expected_content = template.render( - endpoint=self.patroni._endpoint, - endpoints=self.patroni._endpoints, - namespace=self.patroni._namespace, - storage_path=self.patroni._storage_path, - superuser_password=self.patroni._superuser_password, - replication_password=self.patroni._replication_password, + endpoint=patroni._endpoint, + endpoints=patroni._endpoints, + namespace=patroni._namespace, + storage_path=patroni._storage_path, + superuser_password=patroni._superuser_password, + replication_password=patroni._replication_password, rewind_user=REWIND_USER, - rewind_password=self.patroni._rewind_password, - minority_count=self.patroni._members_count // 2, + rewind_password=patroni._rewind_password, + minority_count=patroni._members_count // 2, version="14", ) @@ -168,10 +192,10 @@ def test_render_patroni_yml_file(self, _render_file, _rock_postgresql_version): # Patch the `open` method with our mock. with patch("builtins.open", mock, create=True): # Call the method - self.patroni.render_patroni_yml_file(enable_tls=False) + patroni.render_patroni_yml_file(enable_tls=False) # Check the template is opened read-only in the call to open. - self.assertEqual(mock.call_args_list[0][0], ("templates/patroni.yml.j2", "r")) + tc.assertEqual(mock.call_args_list[0][0], ("templates/patroni.yml.j2", "r")) # Ensure the correct rendered template is sent to _render_file method. _render_file.assert_called_once_with( f"{STORAGE_PATH}/patroni.yml", @@ -183,23 +207,23 @@ def test_render_patroni_yml_file(self, _render_file, _rock_postgresql_version): _render_file.reset_mock() expected_content_with_tls = template.render( enable_tls=True, - endpoint=self.patroni._endpoint, - endpoints=self.patroni._endpoints, - namespace=self.patroni._namespace, - storage_path=self.patroni._storage_path, - superuser_password=self.patroni._superuser_password, - replication_password=self.patroni._replication_password, + endpoint=patroni._endpoint, + endpoints=patroni._endpoints, + namespace=patroni._namespace, + storage_path=patroni._storage_path, + superuser_password=patroni._superuser_password, + replication_password=patroni._replication_password, rewind_user=REWIND_USER, - rewind_password=self.patroni._rewind_password, - minority_count=self.patroni._members_count // 2, + rewind_password=patroni._rewind_password, + minority_count=patroni._members_count // 2, version="14", ) - self.assertNotEqual(expected_content_with_tls, expected_content) + tc.assertNotEqual(expected_content_with_tls, expected_content) # Patch the `open` method with our mock. with patch("builtins.open", mock, create=True): # Call the method - self.patroni.render_patroni_yml_file(enable_tls=True) + patroni.render_patroni_yml_file(enable_tls=True) # Ensure the correct rendered template is sent to _render_file method. _render_file.assert_called_once_with( @@ -210,41 +234,45 @@ def test_render_patroni_yml_file(self, _render_file, _rock_postgresql_version): # Also, ensure the right parameters are in the expected content # (as it was already validated with the above render file call). - self.assertIn("ssl: on", expected_content_with_tls) - self.assertIn("ssl_ca_file: /var/lib/postgresql/data/ca.pem", expected_content_with_tls) - self.assertIn( - "ssl_cert_file: /var/lib/postgresql/data/cert.pem", expected_content_with_tls - ) - self.assertIn("ssl_key_file: /var/lib/postgresql/data/key.pem", expected_content_with_tls) - - @patch("patroni.stop_after_delay", return_value=stop_after_delay(0)) - @patch("patroni.wait_fixed", return_value=wait_fixed(0)) - @patch("requests.get") - def test_primary_endpoint_ready(self, _get, _, __): + tc.assertIn("ssl: on", expected_content_with_tls) + tc.assertIn("ssl_ca_file: /var/lib/postgresql/data/ca.pem", expected_content_with_tls) + tc.assertIn("ssl_cert_file: /var/lib/postgresql/data/cert.pem", expected_content_with_tls) + tc.assertIn("ssl_key_file: /var/lib/postgresql/data/key.pem", expected_content_with_tls) + + +def test_primary_endpoint_ready(harness, patroni): + with ( + patch("patroni.stop_after_delay", return_value=stop_after_delay(0)), + patch("patroni.wait_fixed", return_value=wait_fixed(0)), + patch("requests.get") as _get, + ): # Test with an issue when trying to connect to the Patroni API. _get.side_effect = RetryError - self.assertFalse(self.patroni.primary_endpoint_ready) + tc.assertFalse(patroni.primary_endpoint_ready) # Mock the request return values. _get.side_effect = None _get.return_value.json.return_value = {"state": "stopped"} # Test with the primary endpoint not ready yet. - self.assertFalse(self.patroni.primary_endpoint_ready) + tc.assertFalse(patroni.primary_endpoint_ready) # Test with the primary endpoint ready. _get.return_value.json.return_value = {"state": "running"} - self.assertTrue(self.patroni.primary_endpoint_ready) + tc.assertTrue(patroni.primary_endpoint_ready) + - @patch("patroni.stop_after_delay", return_value=tenacity.stop_after_delay(0)) - @patch("requests.post") - @patch("patroni.Patroni.get_primary") - def test_switchover(self, _get_primary, _post, __): +def test_switchover(harness, patroni): + with ( + patch("patroni.stop_after_delay", return_value=tenacity.stop_after_delay(0)), + patch("requests.post") as _post, + patch("patroni.Patroni.get_primary") as _get_primary, + ): # Test a successful switchover. _get_primary.side_effect = ["postgresql-k8s-0", "postgresql-k8s-1"] response = _post.return_value response.status_code = 200 - self.patroni.switchover() + patroni.switchover() _post.assert_called_once_with( "http://postgresql-k8s-0:8008/switchover", json={"leader": "postgresql-k8s-0", "candidate": None}, @@ -254,7 +282,7 @@ def test_switchover(self, _get_primary, _post, __): # Test a successful switchover with a candidate name. _post.reset_mock() _get_primary.side_effect = ["postgresql-k8s-0", "postgresql-k8s-2"] - self.patroni.switchover("postgresql-k8s/2") + patroni.switchover("postgresql-k8s/2") _post.assert_called_once_with( "http://postgresql-k8s-0:8008/switchover", json={"leader": "postgresql-k8s-0", "candidate": "postgresql-k8s-2"}, @@ -264,8 +292,8 @@ def test_switchover(self, _get_primary, _post, __): # Test failed switchovers. _post.reset_mock() _get_primary.side_effect = ["postgresql-k8s-0", "postgresql-k8s-1"] - with self.assertRaises(SwitchoverFailedError): - self.patroni.switchover("postgresql-k8s/2") + with tc.assertRaises(SwitchoverFailedError): + patroni.switchover("postgresql-k8s/2") _post.assert_called_once_with( "http://postgresql-k8s-0:8008/switchover", json={"leader": "postgresql-k8s-0", "candidate": "postgresql-k8s-2"}, @@ -275,8 +303,8 @@ def test_switchover(self, _get_primary, _post, __): _post.reset_mock() _get_primary.side_effect = ["postgresql-k8s-0", "postgresql-k8s-2"] response.status_code = 400 - with self.assertRaises(SwitchoverFailedError): - self.patroni.switchover("postgresql-k8s/2") + with tc.assertRaises(SwitchoverFailedError): + patroni.switchover("postgresql-k8s/2") _post.assert_called_once_with( "http://postgresql-k8s-0:8008/switchover", json={"leader": "postgresql-k8s-0", "candidate": "postgresql-k8s-2"}, diff --git a/tests/unit/test_postgresql.py b/tests/unit/test_postgresql.py index da54adfeab..d1adae5015 100644 --- a/tests/unit/test_postgresql.py +++ b/tests/unit/test_postgresql.py @@ -1,9 +1,10 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. -import unittest +from unittest import TestCase from unittest.mock import call, patch import psycopg2 +import pytest from charms.postgresql_k8s.v0.postgresql import PostgreSQLCreateDatabaseError from ops.testing import Harness from psycopg2.sql import SQL, Composed, Identifier @@ -11,43 +12,48 @@ from charm import PostgresqlOperatorCharm from constants import PEER +# used for assert functions +tc = TestCase() -class TestPostgreSQL(unittest.TestCase): - @patch("charm.KubernetesServicePatch", lambda x, y: None) - def setUp(self): - self.harness = Harness(PostgresqlOperatorCharm) - self.addCleanup(self.harness.cleanup) + +@pytest.fixture(autouse=True) +def harness(): + with patch("charm.KubernetesServicePatch", lambda x, y: None): + harness = Harness(PostgresqlOperatorCharm) # Set up the initial relation and hooks. - self.peer_rel_id = self.harness.add_relation(PEER, "postgresql-k8s") - self.harness.add_relation_unit(self.peer_rel_id, "postgresql-k8s/0") - self.harness.begin() - self.charm = self.harness.charm + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() - @patch("charms.postgresql_k8s.v0.postgresql.PostgreSQL.enable_disable_extensions") - @patch( - "charms.postgresql_k8s.v0.postgresql.PostgreSQL._generate_database_privileges_statements" - ) - @patch("charms.postgresql_k8s.v0.postgresql.PostgreSQL._connect_to_database") - def test_create_database( - self, - _connect_to_database, - _generate_database_privileges_statements, - _enable_disable_extensions, + +def test_create_database(harness): + with ( + patch( + "charms.postgresql_k8s.v0.postgresql.PostgreSQL.enable_disable_extensions" + ) as _enable_disable_extensions, + patch( + "charms.postgresql_k8s.v0.postgresql.PostgreSQL._generate_database_privileges_statements" + ) as _generate_database_privileges_statements, + patch( + "charms.postgresql_k8s.v0.postgresql.PostgreSQL._connect_to_database" + ) as _connect_to_database, ): # Test a successful database creation. database = "test_database" user = "test_user" plugins = ["test_plugin_1", "test_plugin_2"] - with self.harness.hooks_disabled(): - rel_id = self.harness.add_relation("database", "application") - self.harness.add_relation_unit(rel_id, "application/0") - self.harness.update_relation_data(rel_id, "application", {"database": database}) - database_relation = self.harness.model.get_relation("database") + with harness.hooks_disabled(): + rel_id = harness.add_relation("database", "application") + harness.add_relation_unit(rel_id, "application/0") + harness.update_relation_data(rel_id, "application", {"database": database}) + database_relation = harness.model.get_relation("database") client_relations = [database_relation] schemas = [("test_schema_1",), ("test_schema_2",)] _connect_to_database.return_value.__enter__.return_value.cursor.return_value.__enter__.return_value.fetchall.return_value = schemas - self.charm.postgresql.create_database(database, user, plugins, client_relations) + harness.charm.postgresql.create_database(database, user, plugins, client_relations) execute = _connect_to_database.return_value.cursor.return_value.execute execute.assert_has_calls([ call( @@ -131,15 +137,13 @@ def test_create_database( # Test when two relations request the same database. _connect_to_database.reset_mock() _generate_database_privileges_statements.reset_mock() - with self.harness.hooks_disabled(): - other_rel_id = self.harness.add_relation("database", "other-application") - self.harness.add_relation_unit(other_rel_id, "other-application/0") - self.harness.update_relation_data( - other_rel_id, "other-application", {"database": database} - ) - other_database_relation = self.harness.model.get_relation("database", other_rel_id) + with harness.hooks_disabled(): + other_rel_id = harness.add_relation("database", "other-application") + harness.add_relation_unit(other_rel_id, "other-application/0") + harness.update_relation_data(other_rel_id, "other-application", {"database": database}) + other_database_relation = harness.model.get_relation("database", other_rel_id) client_relations = [database_relation, other_database_relation] - self.charm.postgresql.create_database(database, user, plugins, client_relations) + harness.charm.postgresql.create_database(database, user, plugins, client_relations) _generate_database_privileges_statements.assert_called_once_with( 2, [schemas[0][0], schemas[1][0]], user ) @@ -147,153 +151,155 @@ def test_create_database( # Test a failed database creation. _enable_disable_extensions.reset_mock() execute.side_effect = psycopg2.Error - with self.assertRaises(PostgreSQLCreateDatabaseError): - self.charm.postgresql.create_database(database, user, plugins, client_relations) + with tc.assertRaises(PostgreSQLCreateDatabaseError): + harness.charm.postgresql.create_database(database, user, plugins, client_relations) _enable_disable_extensions.assert_not_called() - def test_generate_database_privileges_statements(self): - # Test with only one established relation. - self.assertEqual( - self.charm.postgresql._generate_database_privileges_statements( - 1, ["test_schema_1", "test_schema_2"], "test_user" - ), - [ - Composed([ - SQL( - "DO $$\nDECLARE r RECORD;\nBEGIN\n FOR r IN (SELECT statement FROM (SELECT 1 AS index,'ALTER TABLE '|| schemaname || '.\"' || tablename ||'\" OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM pg_tables WHERE NOT schemaname IN ('pg_catalog', 'information_schema')\nUNION SELECT 2 AS index,'ALTER SEQUENCE '|| sequence_schema || '.\"' || sequence_name ||'\" OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM information_schema.sequences WHERE NOT sequence_schema IN ('pg_catalog', 'information_schema')\nUNION SELECT 3 AS index,'ALTER FUNCTION '|| nsp.nspname || '.\"' || p.proname ||'\"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema')\nUNION SELECT 4 AS index,'ALTER VIEW '|| schemaname || '.\"' || viewname ||'\" OWNER TO " - ), - Identifier("test_user"), - SQL( - ";' AS statement\nFROM pg_catalog.pg_views WHERE NOT schemaname IN ('pg_catalog', 'information_schema')) AS statements ORDER BY index) LOOP\n EXECUTE format(r.statement);\n END LOOP;\nEND; $$;" - ), - ]), - "UPDATE pg_catalog.pg_largeobject_metadata\nSET lomowner = (SELECT oid FROM pg_roles WHERE rolname = 'test_user')\nWHERE lomowner = (SELECT oid FROM pg_roles WHERE rolname = 'operator');", - ], - ) - # Test with multiple established relations. - self.assertEqual( - self.charm.postgresql._generate_database_privileges_statements( - 2, ["test_schema_1", "test_schema_2"], "test_user" - ), - [ - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA "), - Identifier("test_schema_1"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA "), - Identifier("test_schema_1"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA "), - Identifier("test_schema_1"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA "), - Identifier("test_schema_2"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA "), - Identifier("test_schema_2"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - Composed([ - SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA "), - Identifier("test_schema_2"), - SQL(" TO "), - Identifier("test_user"), - SQL(";"), - ]), - ], - ) - def test_build_postgresql_parameters(self): - # Test when not limit is imposed to the available memory. - config_options = { - "durability_test_config_option_1": True, - "instance_test_config_option_2": False, - "logging_test_config_option_3": "on", - "memory_test_config_option_4": 1024, - "optimizer_test_config_option_5": "scheduled", - "other_test_config_option_6": "test-value", - "profile": "production", - "request_date_style": "ISO, DMY", - "request_time_zone": "UTC", - "request_test_config_option_7": "off", - "response_test_config_option_8": "partial", - "vacuum_test_config_option_9": 10.5, - } - self.assertEqual( - self.charm.postgresql.build_postgresql_parameters(config_options, 1000000000), - { - "test_config_option_1": True, - "test_config_option_2": False, - "test_config_option_3": "on", - "test_config_option_4": 1024, - "test_config_option_5": "scheduled", - "test_config_option_7": "off", - "DateStyle": "ISO, DMY", - "TimeZone": "UTC", - "test_config_option_8": "partial", - "test_config_option_9": 10.5, - "shared_buffers": "250MB", - "effective_cache_size": "750MB", - }, - ) +def test_generate_database_privileges_statements(harness): + # Test with only one established relation. + tc.assertEqual( + harness.charm.postgresql._generate_database_privileges_statements( + 1, ["test_schema_1", "test_schema_2"], "test_user" + ), + [ + Composed([ + SQL( + "DO $$\nDECLARE r RECORD;\nBEGIN\n FOR r IN (SELECT statement FROM (SELECT 1 AS index,'ALTER TABLE '|| schemaname || '.\"' || tablename ||'\" OWNER TO " + ), + Identifier("test_user"), + SQL( + ";' AS statement\nFROM pg_tables WHERE NOT schemaname IN ('pg_catalog', 'information_schema')\nUNION SELECT 2 AS index,'ALTER SEQUENCE '|| sequence_schema || '.\"' || sequence_name ||'\" OWNER TO " + ), + Identifier("test_user"), + SQL( + ";' AS statement\nFROM information_schema.sequences WHERE NOT sequence_schema IN ('pg_catalog', 'information_schema')\nUNION SELECT 3 AS index,'ALTER FUNCTION '|| nsp.nspname || '.\"' || p.proname ||'\"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO " + ), + Identifier("test_user"), + SQL( + ";' AS statement\nFROM pg_proc p JOIN pg_namespace nsp ON p.pronamespace = nsp.oid WHERE NOT nsp.nspname IN ('pg_catalog', 'information_schema')\nUNION SELECT 4 AS index,'ALTER VIEW '|| schemaname || '.\"' || viewname ||'\" OWNER TO " + ), + Identifier("test_user"), + SQL( + ";' AS statement\nFROM pg_catalog.pg_views WHERE NOT schemaname IN ('pg_catalog', 'information_schema')) AS statements ORDER BY index) LOOP\n EXECUTE format(r.statement);\n END LOOP;\nEND; $$;" + ), + ]), + "UPDATE pg_catalog.pg_largeobject_metadata\nSET lomowner = (SELECT oid FROM pg_roles WHERE rolname = 'test_user')\nWHERE lomowner = (SELECT oid FROM pg_roles WHERE rolname = 'operator');", + ], + ) + # Test with multiple established relations. + tc.assertEqual( + harness.charm.postgresql._generate_database_privileges_statements( + 2, ["test_schema_1", "test_schema_2"], "test_user" + ), + [ + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA "), + Identifier("test_schema_1"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA "), + Identifier("test_schema_1"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA "), + Identifier("test_schema_1"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA "), + Identifier("test_schema_2"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA "), + Identifier("test_schema_2"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + Composed([ + SQL("GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA "), + Identifier("test_schema_2"), + SQL(" TO "), + Identifier("test_user"), + SQL(";"), + ]), + ], + ) - # Test with a limited imposed to the available memory. - parameters = self.charm.postgresql.build_postgresql_parameters( - config_options, 1000000000, 600000000 - ) - self.assertEqual(parameters["shared_buffers"], "150MB") - self.assertEqual(parameters["effective_cache_size"], "450MB") - # Test when the requested shared buffers are greater than 40% of the available memory. - config_options["memory_shared_buffers"] = 50001 - with self.assertRaises(Exception): - self.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) +def test_build_postgresql_parameters(harness): + # Test when not limit is imposed to the available memory. + config_options = { + "durability_test_config_option_1": True, + "instance_test_config_option_2": False, + "logging_test_config_option_3": "on", + "memory_test_config_option_4": 1024, + "optimizer_test_config_option_5": "scheduled", + "other_test_config_option_6": "test-value", + "profile": "production", + "request_date_style": "ISO, DMY", + "request_time_zone": "UTC", + "request_test_config_option_7": "off", + "response_test_config_option_8": "partial", + "vacuum_test_config_option_9": 10.5, + } + tc.assertEqual( + harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000), + { + "test_config_option_1": True, + "test_config_option_2": False, + "test_config_option_3": "on", + "test_config_option_4": 1024, + "test_config_option_5": "scheduled", + "test_config_option_7": "off", + "DateStyle": "ISO, DMY", + "TimeZone": "UTC", + "test_config_option_8": "partial", + "test_config_option_9": 10.5, + "shared_buffers": "250MB", + "effective_cache_size": "750MB", + }, + ) + + # Test with a limited imposed to the available memory. + parameters = harness.charm.postgresql.build_postgresql_parameters( + config_options, 1000000000, 600000000 + ) + tc.assertEqual(parameters["shared_buffers"], "150MB") + tc.assertEqual(parameters["effective_cache_size"], "450MB") + + # Test when the requested shared buffers are greater than 40% of the available memory. + config_options["memory_shared_buffers"] = 50001 + with tc.assertRaises(Exception): + harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) - # Test when the requested shared buffers are lower than 40% of the available memory - # (also check that it's used when calculating the effective cache size value). - config_options["memory_shared_buffers"] = 50000 - parameters = self.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) - self.assertEqual(parameters["shared_buffers"], 50000) - self.assertEqual(parameters["effective_cache_size"], "600MB") + # Test when the requested shared buffers are lower than 40% of the available memory + # (also check that it's used when calculating the effective cache size value). + config_options["memory_shared_buffers"] = 50000 + parameters = harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) + tc.assertEqual(parameters["shared_buffers"], 50000) + tc.assertEqual(parameters["effective_cache_size"], "600MB") - # Test when the profile is set to "testing". - config_options["profile"] = "testing" - parameters = self.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) - self.assertEqual(parameters["shared_buffers"], 50000) - self.assertNotIn("effective_cache_size", parameters) + # Test when the profile is set to "testing". + config_options["profile"] = "testing" + parameters = harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) + tc.assertEqual(parameters["shared_buffers"], 50000) + tc.assertNotIn("effective_cache_size", parameters) - # Test when there is no shared_buffers value set in the config option. - del config_options["memory_shared_buffers"] - parameters = self.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) - self.assertEqual(parameters["shared_buffers"], "128MB") - self.assertNotIn("effective_cache_size", parameters) + # Test when there is no shared_buffers value set in the config option. + del config_options["memory_shared_buffers"] + parameters = harness.charm.postgresql.build_postgresql_parameters(config_options, 1000000000) + tc.assertEqual(parameters["shared_buffers"], "128MB") + tc.assertNotIn("effective_cache_size", parameters) diff --git a/tests/unit/test_postgresql_provider.py b/tests/unit/test_postgresql_provider.py index bf8e5351b3..38acb26b37 100644 --- a/tests/unit/test_postgresql_provider.py +++ b/tests/unit/test_postgresql_provider.py @@ -1,9 +1,10 @@ # Copyright 2022 Canonical Ltd. # See LICENSE file for licensing details. -import unittest +from unittest import TestCase from unittest.mock import Mock, PropertyMock, patch +import pytest from charms.postgresql_k8s.v0.postgresql import ( PostgreSQLCreateDatabaseError, PostgreSQLCreateUserError, @@ -23,187 +24,199 @@ RELATION_NAME = "database" POSTGRESQL_VERSION = "14" +# used for assert functions +tc = TestCase() -@patch_network_get(private_address="1.1.1.1") -class TestPostgreSQLProvider(unittest.TestCase): - @patch("charm.KubernetesServicePatch", lambda x, y: None) - def setUp(self): - self.harness = Harness(PostgresqlOperatorCharm) - self.addCleanup(self.harness.cleanup) + +@pytest.fixture(autouse=True) +def harness(): + with patch("charm.KubernetesServicePatch", lambda x, y: None): + harness = Harness(PostgresqlOperatorCharm) # Set up the initial relation and hooks. - self.harness.set_leader(True) - self.harness.begin() - self.app = self.harness.charm.app.name - self.unit = self.harness.charm.unit.name + harness.set_leader(True) + harness.begin() # Define some relations. - self.rel_id = self.harness.add_relation(RELATION_NAME, "application") - self.harness.add_relation_unit(self.rel_id, "application/0") - self.peer_rel_id = self.harness.add_relation(PEER, self.app) - self.harness.add_relation_unit(self.peer_rel_id, self.unit) - self.harness.update_relation_data( - self.peer_rel_id, - self.app, + rel_id = harness.add_relation(RELATION_NAME, "application") + harness.add_relation_unit(rel_id, "application/0") + peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) + harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, {"cluster_initialised": "True"}, ) - self.provider = self.harness.charm.postgresql_client_relation + yield harness + harness.cleanup() - def request_database(self): - # Reset the charm status. - self.harness.model.unit.status = ActiveStatus() - # Reset the application databag. - self.harness.update_relation_data( - self.rel_id, - "application", - {"database": "", "extra-user-roles": ""}, - ) +def request_database(_harness): + # Reset the charm status. + _harness.model.unit.status = ActiveStatus() + rel_id = _harness.model.get_relation(RELATION_NAME).id - # Reset the database databag. - self.harness.update_relation_data( - self.rel_id, - self.app, - {"data": "", "username": "", "password": "", "version": "", "database": ""}, - ) + # Reset the application databag. + _harness.update_relation_data( + rel_id, + "application", + {"database": "", "extra-user-roles": ""}, + ) + + # Reset the database databag. + _harness.update_relation_data( + rel_id, + _harness.charm.app.name, + {"data": "", "username": "", "password": "", "version": "", "database": ""}, + ) - # Simulate the request of a new database plus extra user roles. - self.harness.update_relation_data( - self.rel_id, - "application", - {"database": DATABASE, "extra-user-roles": EXTRA_USER_ROLES}, + # Simulate the request of a new database plus extra user roles. + _harness.update_relation_data( + rel_id, + "application", + {"database": DATABASE, "extra-user-roles": EXTRA_USER_ROLES}, + ) + + +@patch_network_get(private_address="1.1.1.1") +def test_on_database_requested(harness): + with ( + patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, + patch.object(EventBase, "defer") as _defer, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch( + "relations.postgresql_provider.new_password", return_value="test-password" + ) as _new_password, + ): + rel_id = harness.model.get_relation(RELATION_NAME).id + # Set some side effects to test multiple situations. + _member_started.side_effect = [False, True, True, True, True, True] + postgresql_mock.create_user = PropertyMock( + side_effect=[None, PostgreSQLCreateUserError, None, None] + ) + postgresql_mock.create_database = PropertyMock( + side_effect=[None, PostgreSQLCreateDatabaseError, None] + ) + postgresql_mock.get_postgresql_version = PropertyMock( + side_effect=[ + POSTGRESQL_VERSION, + PostgreSQLGetPostgreSQLVersionError, + ] ) - @patch("relations.postgresql_provider.new_password", return_value="test-password") - @patch.object(EventBase, "defer") - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - def test_on_database_requested(self, _member_started, _defer, _new_password): - with patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock: - # Set some side effects to test multiple situations. - _member_started.side_effect = [False, True, True, True, True, True] - postgresql_mock.create_user = PropertyMock( - side_effect=[None, PostgreSQLCreateUserError, None, None] - ) - postgresql_mock.create_database = PropertyMock( - side_effect=[None, PostgreSQLCreateDatabaseError, None] - ) - postgresql_mock.get_postgresql_version = PropertyMock( - side_effect=[ - POSTGRESQL_VERSION, - PostgreSQLGetPostgreSQLVersionError, - ] - ) + # Request a database before the database is ready. + request_database(harness) + _defer.assert_called_once() - # Request a database before the database is ready. - self.request_database() - _defer.assert_called_once() + # Request it again when the database is ready. + request_database(harness) - # Request it again when the database is ready. - self.request_database() + # Assert that the correct calls were made. + user = f"relation_id_{rel_id}" + postgresql_mock.create_user.assert_called_once_with( + user, "test-password", extra_user_roles=EXTRA_USER_ROLES + ) + database_relation = harness.model.get_relation(RELATION_NAME) + client_relations = [database_relation] + postgresql_mock.create_database.assert_called_once_with( + DATABASE, user, plugins=[], client_relations=client_relations + ) + postgresql_mock.get_postgresql_version.assert_called_once() + + # Assert that the relation data was updated correctly. + tc.assertEqual( + harness.get_relation_data(rel_id, harness.charm.app.name), + { + "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', + "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", + "username": user, + "password": "test-password", + "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", + "version": POSTGRESQL_VERSION, + "database": f"{DATABASE}", + }, + ) - # Assert that the correct calls were made. - user = f"relation_id_{self.rel_id}" - postgresql_mock.create_user.assert_called_once_with( - user, "test-password", extra_user_roles=EXTRA_USER_ROLES - ) - database_relation = self.harness.model.get_relation(RELATION_NAME) - client_relations = [database_relation] - postgresql_mock.create_database.assert_called_once_with( - DATABASE, user, plugins=[], client_relations=client_relations - ) - postgresql_mock.get_postgresql_version.assert_called_once() - - # Assert that the relation data was updated correctly. - self.assertEqual( - self.harness.get_relation_data(self.rel_id, self.app), - { - "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', - "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", - "username": user, - "password": "test-password", - "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", - "version": POSTGRESQL_VERSION, - "database": f"{DATABASE}", - }, - ) + # Assert no BlockedStatus was set. + tc.assertFalse(isinstance(harness.model.unit.status, BlockedStatus)) + + # BlockedStatus due to a PostgreSQLCreateUserError. + request_database(harness) + tc.assertTrue(isinstance(harness.model.unit.status, BlockedStatus)) + # No data is set in the databag by the database. + tc.assertEqual( + harness.get_relation_data(rel_id, harness.charm.app.name), + { + "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', + "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", + "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", + }, + ) - # Assert no BlockedStatus was set. - self.assertFalse(isinstance(self.harness.model.unit.status, BlockedStatus)) - - # BlockedStatus due to a PostgreSQLCreateUserError. - self.request_database() - self.assertTrue(isinstance(self.harness.model.unit.status, BlockedStatus)) - # No data is set in the databag by the database. - self.assertEqual( - self.harness.get_relation_data(self.rel_id, self.app), - { - "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', - "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", - "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", - }, - ) + # BlockedStatus due to a PostgreSQLCreateDatabaseError. + request_database(harness) + tc.assertTrue(isinstance(harness.model.unit.status, BlockedStatus)) + # No data is set in the databag by the database. + tc.assertEqual( + harness.get_relation_data(rel_id, harness.charm.app.name), + { + "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', + "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", + "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", + }, + ) - # BlockedStatus due to a PostgreSQLCreateDatabaseError. - self.request_database() - self.assertTrue(isinstance(self.harness.model.unit.status, BlockedStatus)) - # No data is set in the databag by the database. - self.assertEqual( - self.harness.get_relation_data(self.rel_id, self.app), - { - "data": f'{{"database": "{DATABASE}", "extra-user-roles": "{EXTRA_USER_ROLES}"}}', - "endpoints": "postgresql-k8s-primary.None.svc.cluster.local:5432", - "read-only-endpoints": "postgresql-k8s-replicas.None.svc.cluster.local:5432", - }, - ) + # BlockedStatus due to a PostgreSQLGetPostgreSQLVersionError. + request_database(harness) + tc.assertTrue(isinstance(harness.model.unit.status, BlockedStatus)) - # BlockedStatus due to a PostgreSQLGetPostgreSQLVersionError. - self.request_database() - self.assertTrue(isinstance(self.harness.model.unit.status, BlockedStatus)) - @patch("charm.Patroni.member_started", new_callable=PropertyMock(return_value=True)) - def test_on_relation_departed(self, _): +@patch_network_get(private_address="1.1.1.1") +def test_on_relation_departed(harness): + with patch("charm.Patroni.member_started", new_callable=PropertyMock(return_value=True)): + peer_rel_id = harness.model.get_relation(PEER).id # Test when this unit is departing the relation (due to a scale down event). - self.assertNotIn( - "departing", self.harness.get_relation_data(self.peer_rel_id, self.harness.charm.unit) - ) + tc.assertNotIn("departing", harness.get_relation_data(peer_rel_id, harness.charm.unit)) event = Mock() - event.relation.data = {self.harness.charm.app: {}, self.harness.charm.unit: {}} - event.departing_unit = self.harness.charm.unit - self.harness.charm.postgresql_client_relation._on_relation_departed(event) - self.assertIn( - "departing", self.harness.get_relation_data(self.peer_rel_id, self.harness.charm.unit) - ) + event.relation.data = {harness.charm.app: {}, harness.charm.unit: {}} + event.departing_unit = harness.charm.unit + harness.charm.postgresql_client_relation._on_relation_departed(event) + tc.assertIn("departing", harness.get_relation_data(peer_rel_id, harness.charm.unit)) # Test when this unit is departing the relation (due to the relation being broken between the apps). - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, self.harness.charm.unit.name, {"departing": ""} + with harness.hooks_disabled(): + harness.update_relation_data(peer_rel_id, harness.charm.unit.name, {"departing": ""}) + event.relation.data = {harness.charm.app: {}, harness.charm.unit: {}} + event.departing_unit = Unit(f"{harness.charm.app}/1", None, harness.charm.app._backend, {}) + harness.charm.postgresql_client_relation._on_relation_departed(event) + relation_data = harness.get_relation_data(peer_rel_id, harness.charm.unit) + tc.assertNotIn("departing", relation_data) + + +@patch_network_get(private_address="1.1.1.1") +def test_on_relation_broken(harness): + with harness.hooks_disabled(): + harness.set_leader() + with ( + patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, + patch( + "charm.Patroni.member_started", new_callable=PropertyMock(return_value=True) + ) as _member_started, + ): + rel_id = harness.model.get_relation(RELATION_NAME).id + peer_rel_id = harness.model.get_relation(PEER).id + # Test when this unit is departing the relation (due to the relation being broken between the apps). + event = Mock() + event.relation.id = rel_id + harness.charm.postgresql_client_relation._on_relation_broken(event) + user = f"relation_id_{rel_id}" + postgresql_mock.delete_user.assert_called_once_with(user) + + # Test when this unit is departing the relation (due to a scale down event). + postgresql_mock.reset_mock() + with harness.hooks_disabled(): + harness.update_relation_data( + peer_rel_id, harness.charm.unit.name, {"departing": "True"} ) - event.relation.data = {self.harness.charm.app: {}, self.harness.charm.unit: {}} - event.departing_unit = Unit( - f"{self.harness.charm.app}/1", None, self.harness.charm.app._backend, {} - ) - self.harness.charm.postgresql_client_relation._on_relation_departed(event) - relation_data = self.harness.get_relation_data(self.peer_rel_id, self.harness.charm.unit) - self.assertNotIn("departing", relation_data) - - @patch("charm.Patroni.member_started", new_callable=PropertyMock(return_value=True)) - def test_on_relation_broken(self, _member_started): - with self.harness.hooks_disabled(): - self.harness.set_leader() - with patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock: - # Test when this unit is departing the relation (due to the relation being broken between the apps). - event = Mock() - event.relation.id = self.rel_id - self.harness.charm.postgresql_client_relation._on_relation_broken(event) - user = f"relation_id_{self.rel_id}" - postgresql_mock.delete_user.assert_called_once_with(user) - - # Test when this unit is departing the relation (due to a scale down event). - postgresql_mock.reset_mock() - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.peer_rel_id, self.harness.charm.unit.name, {"departing": "True"} - ) - self.harness.charm.postgresql_client_relation._on_relation_broken(event) - postgresql_mock.delete_user.assert_not_called() + harness.charm.postgresql_client_relation._on_relation_broken(event) + postgresql_mock.delete_user.assert_not_called() diff --git a/tests/unit/test_postgresql_tls.py b/tests/unit/test_postgresql_tls.py index ab7265c276..3fe00b324d 100644 --- a/tests/unit/test_postgresql_tls.py +++ b/tests/unit/test_postgresql_tls.py @@ -2,9 +2,10 @@ # See LICENSE file for licensing details. import base64 import socket -import unittest +from unittest import TestCase from unittest.mock import MagicMock, call, patch +import pytest from ops.pebble import ConnectionError from ops.testing import Harness @@ -15,94 +16,100 @@ RELATION_NAME = "certificates" SCOPE = "unit" +# used for assert functions +tc = TestCase() -class TestPostgreSQLTLS(unittest.TestCase): - def delete_secrets(self) -> None: - # Delete TLS secrets from the secret store. - self.charm.set_secret(SCOPE, "ca", None) - self.charm.set_secret(SCOPE, "cert", None) - self.charm.set_secret(SCOPE, "chain", None) - def emit_certificate_available_event(self) -> None: - self.charm.tls.certs.on.certificate_available.emit( - certificate_signing_request="test-csr", - certificate="test-cert", - ca="test-ca", - chain=["test-chain-ca-certificate", "test-chain-certificate"], - ) - - def emit_certificate_expiring_event(self) -> None: - self.charm.tls.certs.on.certificate_expiring.emit(certificate="test-cert", expiry=None) - - @staticmethod - def get_content_from_file(filename: str) -> str: - with open(filename, "r") as file: - content = file.read() - return content - - def no_secrets(self, include_certificate: bool = True) -> bool: - # Check whether there is no TLS secrets in the secret store. - secrets = [self.charm.get_secret(SCOPE, "ca"), self.charm.get_secret(SCOPE, "chain")] - if include_certificate: - secrets.append(self.charm.get_secret(SCOPE, "cert")) - return all(secret is None for secret in secrets) - - def relate_to_tls_certificates_operator(self) -> int: - # Relate the charm to the TLS certificates operator. - rel_id = self.harness.add_relation(RELATION_NAME, "tls-certificates-operator") - self.harness.add_relation_unit(rel_id, "tls-certificates-operator/0") - return rel_id - - def set_secrets(self) -> None: - # Set some TLS secrets in the secret store. - self.charm.set_secret(SCOPE, "ca", "test-ca") - self.charm.set_secret(SCOPE, "cert", "test-cert") - self.charm.set_secret(SCOPE, "chain", "test-chain") - - @patch("charm.KubernetesServicePatch", lambda x, y: None) - def setUp(self): - self.harness = Harness(PostgresqlOperatorCharm) - self.addCleanup(self.harness.cleanup) +@pytest.fixture(autouse=True) +def harness(): + with patch("charm.KubernetesServicePatch", lambda x, y: None): + harness = Harness(PostgresqlOperatorCharm) # Set up the initial relation and hooks. - self.peer_rel_id = self.harness.add_relation(PEER, "postgresql-k8s") - self.harness.add_relation_unit(self.peer_rel_id, "postgresql-k8s/0") - self.harness.begin() - self.charm = self.harness.charm + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() + + +def delete_secrets(_harness): + # Delete TLS secrets from the secret store. + _harness.charm.set_secret(SCOPE, "ca", None) + _harness.charm.set_secret(SCOPE, "cert", None) + _harness.charm.set_secret(SCOPE, "chain", None) + + +def emit_certificate_available_event(_harness): + _harness.charm.tls.certs.on.certificate_available.emit( + certificate_signing_request="test-csr", + certificate="test-cert", + ca="test-ca", + chain=["test-chain-ca-certificate", "test-chain-certificate"], + ) + + +def emit_certificate_expiring_event(_harness): + _harness.charm.tls.certs.on.certificate_expiring.emit(certificate="test-cert", expiry=None) + - @patch("charms.postgresql_k8s.v0.postgresql_tls.PostgreSQLTLS._request_certificate") - def test_on_set_tls_private_key(self, _request_certificate): +def get_content_from_file(filename: str): + with open(filename, "r") as file: + content = file.read() + return content + + +def no_secrets(_harness, include_certificate: bool = True): + # Check whether there is no TLS secrets in the secret store. + secrets = [_harness.charm.get_secret(SCOPE, "ca"), _harness.charm.get_secret(SCOPE, "chain")] + if include_certificate: + secrets.append(_harness.charm.get_secret(SCOPE, "cert")) + return all(secret is None for secret in secrets) + + +def relate_to_tls_certificates_operator(_harness): + # Relate the charm to the TLS certificates operator. + rel_id = _harness.add_relation(RELATION_NAME, "tls-certificates-operator") + _harness.add_relation_unit(rel_id, "tls-certificates-operator/0") + return rel_id + + +def test_on_set_tls_private_key(harness): + with patch( + "charms.postgresql_k8s.v0.postgresql_tls.PostgreSQLTLS._request_certificate" + ) as _request_certificate: # Create a mock event. mock_event = MagicMock(params={}) # Test without providing a private key. - self.charm.tls._on_set_tls_private_key(mock_event) + harness.charm.tls._on_set_tls_private_key(mock_event) _request_certificate.assert_called_once_with(None) # Test providing the private key. mock_event.params["private-key"] = "test-key" _request_certificate.reset_mock() - self.charm.tls._on_set_tls_private_key(mock_event) + harness.charm.tls._on_set_tls_private_key(mock_event) _request_certificate.assert_called_once_with("test-key") - @patch_network_get(private_address="1.1.1.1") - @patch( - "charms.tls_certificates_interface.v2.tls_certificates.TLSCertificatesRequiresV2.request_certificate_creation" - ) - @patch( - "charms.postgresql_k8s.v0.postgresql_tls.generate_csr", - return_value=b"fake CSR", - ) - @patch( - "charms.postgresql_k8s.v0.postgresql_tls.generate_private_key", - return_value=b"fake private key", - ) - def test_request_certificate( - self, _generate_private_key, _generate_csr, _request_certificate_creation + +@patch_network_get(private_address="1.1.1.1") +def test_request_certificate(harness): + with ( + patch( + "charms.tls_certificates_interface.v2.tls_certificates.TLSCertificatesRequiresV2.request_certificate_creation" + ) as _request_certificate_creation, + patch( + "charms.postgresql_k8s.v0.postgresql_tls.generate_csr", + return_value=b"fake CSR", + ) as _generate_csr, + patch( + "charms.postgresql_k8s.v0.postgresql_tls.generate_private_key", + return_value=b"fake private key", + ) as _generate_private_key, ): # Test without an established relation. - self.delete_secrets() - self.charm.tls._request_certificate(None) + delete_secrets(harness) + harness.charm.tls._request_certificate(None) generate_csr_call = call( private_key=b"fake private key", subject="postgresql-k8s-0.postgresql-k8s-endpoints", @@ -112,30 +119,30 @@ def test_request_certificate( "postgresql-k8s-0.postgresql-k8s-endpoints", socket.getfqdn(), "1.1.1.1", - f"postgresql-k8s-primary.{self.charm.model.name}.svc.cluster.local", - f"postgresql-k8s-replicas.{self.charm.model.name}.svc.cluster.local", + f"postgresql-k8s-primary.{harness.charm.model.name}.svc.cluster.local", + f"postgresql-k8s-replicas.{harness.charm.model.name}.svc.cluster.local", ], ) _generate_csr.assert_has_calls([generate_csr_call]) - self.assertIsNotNone(self.charm.get_secret(SCOPE, "key")) - self.assertIsNotNone(self.charm.get_secret(SCOPE, "csr")) + tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "key")) + tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "csr")) _request_certificate_creation.assert_not_called() # Test without providing a private key. _generate_csr.reset_mock() - with self.harness.hooks_disabled(): - self.relate_to_tls_certificates_operator() - self.charm.tls._request_certificate(None) + with harness.hooks_disabled(): + relate_to_tls_certificates_operator(harness) + harness.charm.tls._request_certificate(None) _generate_csr.assert_has_calls([generate_csr_call]) - self.assertIsNotNone(self.charm.get_secret(SCOPE, "key")) - self.assertIsNotNone(self.charm.get_secret(SCOPE, "csr")) + tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "key")) + tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "csr")) _request_certificate_creation.assert_called_once() # Test providing a private key. _generate_csr.reset_mock() _request_certificate_creation.reset_mock() - key = self.get_content_from_file(filename="tests/unit/key.pem") - self.charm.tls._request_certificate(key) + key = get_content_from_file(filename="tests/unit/key.pem") + harness.charm.tls._request_certificate(key) custom_key_generate_csr_call = call( private_key=key.encode("utf-8"), subject="postgresql-k8s-0.postgresql-k8s-endpoints", @@ -145,115 +152,130 @@ def test_request_certificate( "postgresql-k8s-0.postgresql-k8s-endpoints", socket.getfqdn(), "1.1.1.1", - f"postgresql-k8s-primary.{self.charm.model.name}.svc.cluster.local", - f"postgresql-k8s-replicas.{self.charm.model.name}.svc.cluster.local", + f"postgresql-k8s-primary.{harness.charm.model.name}.svc.cluster.local", + f"postgresql-k8s-replicas.{harness.charm.model.name}.svc.cluster.local", ], ) _generate_csr.assert_has_calls([custom_key_generate_csr_call]) - self.assertIsNotNone(self.charm.get_secret(SCOPE, "key")) - self.assertIsNotNone(self.charm.get_secret(SCOPE, "csr")) + tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "key")) + tc.assertIsNotNone(harness.charm.get_secret(SCOPE, "csr")) _request_certificate_creation.assert_called_once() - def test_parse_tls_file(self): - # Test with a plain text key. - key = self.get_content_from_file(filename="tests/unit/key.pem") - parsed_key = self.charm.tls._parse_tls_file(key) - self.assertEqual(parsed_key, key.encode("utf-8")) - # Test with a base64 encoded key. - key = self.get_content_from_file(filename="tests/unit/key.pem") - parsed_key = self.charm.tls._parse_tls_file( - base64.b64encode(key.encode("utf-8")).decode("utf-8") - ) - self.assertEqual(parsed_key, key.encode("utf-8")) +def test_parse_tls_file(harness): + # Test with a plain text key. + key = get_content_from_file(filename="tests/unit/key.pem") + parsed_key = harness.charm.tls._parse_tls_file(key) + tc.assertEqual(parsed_key, key.encode("utf-8")) + + # Test with a base64 encoded key. + key = get_content_from_file(filename="tests/unit/key.pem") + parsed_key = harness.charm.tls._parse_tls_file( + base64.b64encode(key.encode("utf-8")).decode("utf-8") + ) + tc.assertEqual(parsed_key, key.encode("utf-8")) - @patch("charms.postgresql_k8s.v0.postgresql_tls.PostgreSQLTLS._request_certificate") - def test_on_tls_relation_joined(self, _request_certificate): - self.relate_to_tls_certificates_operator() + +def test_on_tls_relation_joined(harness): + with patch( + "charms.postgresql_k8s.v0.postgresql_tls.PostgreSQLTLS._request_certificate" + ) as _request_certificate: + relate_to_tls_certificates_operator(harness) _request_certificate.assert_called_once_with(None) - @patch_network_get(private_address="1.1.1.1") - @patch("charm.PostgresqlOperatorCharm.update_config") - def test_on_tls_relation_broken(self, _update_config): + +@patch_network_get(private_address="1.1.1.1") +def test_on_tls_relation_broken(harness): + with patch("charm.PostgresqlOperatorCharm.update_config") as _update_config: _update_config.reset_mock() - rel_id = self.relate_to_tls_certificates_operator() - self.harness.remove_relation(rel_id) + rel_id = relate_to_tls_certificates_operator(harness) + harness.remove_relation(rel_id) _update_config.assert_called_once() - self.assertTrue(self.no_secrets()) + tc.assertTrue(no_secrets(harness)) - @patch("ops.framework.EventBase.defer") - @patch("charm.PostgresqlOperatorCharm.push_tls_files_to_workload") - def test_on_certificate_available(self, _push_tls_files_to_workload, _defer): + +def test_on_certificate_available(harness): + with ( + patch("ops.framework.EventBase.defer") as _defer, + patch( + "charm.PostgresqlOperatorCharm.push_tls_files_to_workload" + ) as _push_tls_files_to_workload, + ): # Test with no provided or invalid CSR. - self.emit_certificate_available_event() - self.assertTrue(self.no_secrets()) + emit_certificate_available_event(harness) + tc.assertTrue(no_secrets(harness)) _push_tls_files_to_workload.assert_not_called() # Test providing CSR. - self.charm.set_secret(SCOPE, "csr", "test-csr\n") - self.emit_certificate_available_event() - self.assertEqual(self.charm.get_secret(SCOPE, "ca"), "test-ca") - self.assertEqual(self.charm.get_secret(SCOPE, "cert"), "test-cert") - self.assertEqual( - self.charm.get_secret(SCOPE, "chain"), + harness.charm.set_secret(SCOPE, "csr", "test-csr\n") + emit_certificate_available_event(harness) + tc.assertEqual(harness.charm.get_secret(SCOPE, "ca"), "test-ca") + tc.assertEqual(harness.charm.get_secret(SCOPE, "cert"), "test-cert") + tc.assertEqual( + harness.charm.get_secret(SCOPE, "chain"), "test-chain-ca-certificate\ntest-chain-certificate", ) _push_tls_files_to_workload.assert_called_once() _defer.assert_not_called() _push_tls_files_to_workload.side_effect = ConnectionError - self.emit_certificate_available_event() + emit_certificate_available_event(harness) _defer.assert_called_once() - @patch_network_get(private_address="1.1.1.1") - @patch( - "charms.tls_certificates_interface.v2.tls_certificates.TLSCertificatesRequiresV2.request_certificate_renewal" - ) - def test_on_certificate_expiring(self, _request_certificate_renewal): + +@patch_network_get(private_address="1.1.1.1") +def test_on_certificate_expiring(harness): + with ( + patch( + "charms.tls_certificates_interface.v2.tls_certificates.TLSCertificatesRequiresV2.request_certificate_renewal" + ) as _request_certificate_renewal, + ): # Test with no provided or invalid certificate. - self.emit_certificate_expiring_event() - self.assertTrue(self.no_secrets()) + emit_certificate_expiring_event(harness) + tc.assertTrue(no_secrets(harness)) # Test providing a certificate. - self.charm.set_secret( - SCOPE, "key", self.get_content_from_file(filename="tests/unit/key.pem") + harness.charm.set_secret( + SCOPE, "key", get_content_from_file(filename="tests/unit/key.pem") ) - self.charm.set_secret(SCOPE, "cert", "test-cert\n") - self.charm.set_secret(SCOPE, "csr", "test-csr") - self.emit_certificate_expiring_event() - self.assertTrue(self.no_secrets(include_certificate=False)) + harness.charm.set_secret(SCOPE, "cert", "test-cert\n") + harness.charm.set_secret(SCOPE, "csr", "test-csr") + emit_certificate_expiring_event(harness) + tc.assertTrue(no_secrets(harness, include_certificate=False)) _request_certificate_renewal.assert_called_once() - @patch_network_get(private_address="1.1.1.1") - def test_get_sans(self): - sans = self.charm.tls._get_sans() - self.assertEqual( - sans, - { - "sans_ip": ["1.1.1.1"], - "sans_dns": [ - "postgresql-k8s-0", - "postgresql-k8s-0.postgresql-k8s-endpoints", - socket.getfqdn(), - "1.1.1.1", - "postgresql-k8s-primary.None.svc.cluster.local", - "postgresql-k8s-replicas.None.svc.cluster.local", - ], - }, - ) - def test_get_tls_files(self): - # Test with no TLS files available. - key, ca, certificate = self.charm.tls.get_tls_files() - self.assertIsNone(key) - self.assertIsNone(ca) - self.assertIsNone(certificate) - - # Test with TLS files available. - self.charm.set_secret(SCOPE, "key", "test-key") - self.charm.set_secret(SCOPE, "ca", "test-ca") - self.charm.set_secret(SCOPE, "cert", "test-cert") - key, ca, certificate = self.charm.tls.get_tls_files() - self.assertEqual(key, "test-key") - self.assertEqual(ca, "test-ca") - self.assertEqual(certificate, "test-cert") +@patch_network_get(private_address="1.1.1.1") +def test_get_sans(harness): + sans = harness.charm.tls._get_sans() + tc.assertEqual( + sans, + { + "sans_ip": ["1.1.1.1"], + "sans_dns": [ + "postgresql-k8s-0", + "postgresql-k8s-0.postgresql-k8s-endpoints", + socket.getfqdn(), + "1.1.1.1", + "postgresql-k8s-primary.None.svc.cluster.local", + "postgresql-k8s-replicas.None.svc.cluster.local", + ], + }, + ) + + +def test_get_tls_files(harness): + # Test with no TLS files available. + key, ca, certificate = harness.charm.tls.get_tls_files() + tc.assertIsNone(key) + tc.assertIsNone(ca) + tc.assertIsNone(certificate) + + # Test with TLS files available. + harness.charm.set_secret(SCOPE, "key", "test-key") + harness.charm.set_secret(SCOPE, "ca", "test-ca") + harness.charm.set_secret(SCOPE, "cert", "test-cert") + key, ca, certificate = harness.charm.tls.get_tls_files() + tc.assertEqual(key, "test-key") + tc.assertEqual(ca, "test-ca") + tc.assertEqual(certificate, "test-cert") diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index f1eeb9285e..9d90af81fc 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -1,8 +1,9 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. -import unittest +from unittest import TestCase from unittest.mock import MagicMock, PropertyMock, call, patch +import pytest import tenacity from charms.data_platform_libs.v0.upgrade import ( ClusterNotReadyError, @@ -15,56 +16,63 @@ from patroni import SwitchoverFailedError from tests.unit.helpers import _FakeApiError +# used for assert functions +tc = TestCase() -class TestUpgrade(unittest.TestCase): - """Test the upgrade class.""" - @patch("charm.KubernetesServicePatch", lambda x, y: None) - def setUp(self): +@pytest.fixture(autouse=True) +def harness(): + with patch("charm.KubernetesServicePatch", lambda x, y: None): """Set up the test.""" - self.patcher = patch("lightkube.core.client.GenericSyncClient") - self.patcher.start() - self.harness = Harness(PostgresqlOperatorCharm) - self.harness.begin() - self.upgrade_relation_id = self.harness.add_relation("upgrade", "postgresql-k8s") - self.peer_relation_id = self.harness.add_relation("database-peers", "postgresql-k8s") - for rel_id in (self.upgrade_relation_id, self.peer_relation_id): - self.harness.add_relation_unit(rel_id, "postgresql-k8s/1") - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.upgrade_relation_id, "postgresql-k8s/1", {"state": "idle"} + patcher = patch("lightkube.core.client.GenericSyncClient") + patcher.start() + harness = Harness(PostgresqlOperatorCharm) + harness.begin() + upgrade_relation_id = harness.add_relation("upgrade", "postgresql-k8s") + peer_relation_id = harness.add_relation("database-peers", "postgresql-k8s") + for rel_id in (upgrade_relation_id, peer_relation_id): + harness.add_relation_unit(rel_id, "postgresql-k8s/1") + with harness.hooks_disabled(): + harness.update_relation_data( + upgrade_relation_id, "postgresql-k8s/1", {"state": "idle"} ) - self.charm = self.harness.charm - - def test_is_no_sync_member(self): - # Test when there is no list of sync-standbys in the relation data. - self.assertFalse(self.charm.upgrade.is_no_sync_member) - - # Test when the current unit is not part of the list of sync-standbys - # from the relation data. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.upgrade_relation_id, - self.charm.app.name, - {"sync-standbys": '["postgresql-k8s/1", "postgresql-k8s/2"]'}, - ) - self.assertTrue(self.charm.upgrade.is_no_sync_member) - - # Test when the current unit is part of the list of sync-standbys from the relation data. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.upgrade_relation_id, - self.charm.app.name, - { - "sync-standbys": f'["{self.charm.unit.name}", "postgresql-k8s/1", "postgresql-k8s/2"]' - }, - ) - self.assertFalse(self.charm.upgrade.is_no_sync_member) + yield harness + harness.cleanup() + + +def test_is_no_sync_member(harness): + # Test when there is no list of sync-standbys in the relation data. + tc.assertFalse(harness.charm.upgrade.is_no_sync_member) + upgrade_relation_id = harness.model.get_relation("upgrade").id + + # Test when the current unit is not part of the list of sync-standbys + # from the relation data. + with harness.hooks_disabled(): + harness.update_relation_data( + upgrade_relation_id, + harness.charm.app.name, + {"sync-standbys": '["postgresql-k8s/1", "postgresql-k8s/2"]'}, + ) + tc.assertTrue(harness.charm.upgrade.is_no_sync_member) + + # Test when the current unit is part of the list of sync-standbys from the relation data. + with harness.hooks_disabled(): + harness.update_relation_data( + upgrade_relation_id, + harness.charm.app.name, + { + "sync-standbys": f'["{harness.charm.unit.name}", "postgresql-k8s/1", "postgresql-k8s/2"]' + }, + ) + tc.assertFalse(harness.charm.upgrade.is_no_sync_member) - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch("upgrade.logger.info") - def test_log_rollback(self, mock_logging, _update_config): - self.charm.upgrade.log_rollback_instructions() + +def test_log_rollback(harness): + with ( + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch("upgrade.logger.info") as mock_logging, + ): + harness.charm.upgrade.log_rollback_instructions() calls = [ call( "Run `juju refresh --revision postgresql-k8s` to initiate the rollback" @@ -75,38 +83,40 @@ def test_log_rollback(self, mock_logging, _update_config): ] mock_logging.assert_has_calls(calls) - @patch("charms.data_platform_libs.v0.upgrade.DataUpgrade.set_unit_failed") - @patch("charms.data_platform_libs.v0.upgrade.DataUpgrade.set_unit_completed") - @patch("charm.Patroni.is_replication_healthy", new_callable=PropertyMock) - @patch("charm.Patroni.cluster_members", new_callable=PropertyMock) - @patch("upgrade.wait_fixed", return_value=tenacity.wait_fixed(0)) - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - def test_on_postgresql_pebble_ready( - self, - _member_started, - _, - _cluster_members, - _is_replication_healthy, - _set_unit_completed, - _set_unit_failed, + +def test_on_postgresql_pebble_ready(harness): + with ( + patch( + "charms.data_platform_libs.v0.upgrade.DataUpgrade.set_unit_failed" + ) as _set_unit_failed, + patch( + "charms.data_platform_libs.v0.upgrade.DataUpgrade.set_unit_completed" + ) as _set_unit_completed, + patch( + "charm.Patroni.is_replication_healthy", new_callable=PropertyMock + ) as _is_replication_healthy, + patch("charm.Patroni.cluster_members", new_callable=PropertyMock) as _cluster_members, + patch("upgrade.wait_fixed", return_value=tenacity.wait_fixed(0)), + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, ): # Set some side effects to test multiple situations. _member_started.side_effect = [False, True, True, True] + upgrade_relation_id = harness.model.get_relation("upgrade").id # Test when the unit status is different from "upgrading". mock_event = MagicMock() - self.charm.upgrade._on_postgresql_pebble_ready(mock_event) + harness.charm.upgrade._on_postgresql_pebble_ready(mock_event) _member_started.assert_not_called() mock_event.defer.assert_not_called() _set_unit_completed.assert_not_called() _set_unit_failed.assert_not_called() # Test when the unit status is equal to "upgrading", but the member hasn't started yet. - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.upgrade_relation_id, self.charm.unit.name, {"state": "upgrading"} + with harness.hooks_disabled(): + harness.update_relation_data( + upgrade_relation_id, harness.charm.unit.name, {"state": "upgrading"} ) - self.charm.upgrade._on_postgresql_pebble_ready(mock_event) + harness.charm.upgrade._on_postgresql_pebble_ready(mock_event) _member_started.assert_called_once() mock_event.defer.assert_called_once() _set_unit_completed.assert_not_called() @@ -117,7 +127,7 @@ def test_on_postgresql_pebble_ready( _member_started.reset_mock() mock_event.defer.reset_mock() _cluster_members.return_value = ["postgresql-k8s-1"] - self.charm.upgrade._on_postgresql_pebble_ready(mock_event) + harness.charm.upgrade._on_postgresql_pebble_ready(mock_event) _member_started.assert_called_once() mock_event.defer.assert_not_called() _set_unit_completed.assert_not_called() @@ -128,11 +138,11 @@ def test_on_postgresql_pebble_ready( _set_unit_failed.reset_mock() mock_event.defer.reset_mock() _cluster_members.return_value = [ - self.charm.unit.name.replace("/", "-"), + harness.charm.unit.name.replace("/", "-"), "postgresql-k8s-1", ] _is_replication_healthy.return_value = False - self.charm.upgrade._on_postgresql_pebble_ready(mock_event) + harness.charm.upgrade._on_postgresql_pebble_ready(mock_event) mock_event.defer.assert_not_called() _set_unit_completed.assert_not_called() _set_unit_failed.assert_called_once() @@ -142,44 +152,44 @@ def test_on_postgresql_pebble_ready( _set_unit_failed.reset_mock() mock_event.defer.reset_mock() _is_replication_healthy.return_value = True - self.charm.upgrade._on_postgresql_pebble_ready(mock_event) + harness.charm.upgrade._on_postgresql_pebble_ready(mock_event) _member_started.assert_called_once() mock_event.defer.assert_not_called() _set_unit_completed.assert_called_once() _set_unit_failed.assert_not_called() - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch("charm.Patroni.member_started", new_callable=PropertyMock) - def test_on_upgrade_changed(self, _member_started, _update_config): + +def test_on_upgrade_changed(harness): + with ( + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + ): _member_started.return_value = False - relation = self.harness.model.get_relation("upgrade") - self.charm.on.upgrade_relation_changed.emit(relation) + relation = harness.model.get_relation("upgrade") + harness.charm.on.upgrade_relation_changed.emit(relation) _update_config.assert_not_called() _member_started.return_value = True - self.charm.on.upgrade_relation_changed.emit(relation) + harness.charm.on.upgrade_relation_changed.emit(relation) _update_config.assert_called_once() - @patch("charm.PostgreSQLUpgrade._set_rolling_update_partition") - @patch("charm.PostgreSQLUpgrade._set_list_of_sync_standbys") - @patch("charm.Patroni.switchover") - @patch("charm.Patroni.get_sync_standby_names") - @patch("charm.PostgresqlOperatorCharm.update_config") - @patch("charm.Patroni.get_primary") - @patch("charm.Patroni.is_creating_backup", new_callable=PropertyMock) - @patch("charm.Patroni.are_all_members_ready") - def test_pre_upgrade_check( - self, - _are_all_members_ready, - _is_creating_backup, - _get_primary, - _update_config, - _get_sync_standby_names, - _switchover, - _set_list_of_sync_standbys, - _set_rolling_update_partition, + +def test_pre_upgrade_check(harness): + with ( + patch( + "charm.PostgreSQLUpgrade._set_rolling_update_partition" + ) as _set_rolling_update_partition, + patch("charm.PostgreSQLUpgrade._set_list_of_sync_standbys") as _set_list_of_sync_standbys, + patch("charm.Patroni.switchover") as _switchover, + patch("charm.Patroni.get_sync_standby_names") as _get_sync_standby_names, + patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch("charm.Patroni.get_primary") as _get_primary, + patch( + "charm.Patroni.is_creating_backup", new_callable=PropertyMock + ) as _is_creating_backup, + patch("charm.Patroni.are_all_members_ready") as _are_all_members_ready, ): - self.harness.set_leader(True) + harness.set_leader(True) # Set some side effects to test multiple situations. _are_all_members_ready.side_effect = [False, True, True, True, True, True, True] @@ -187,50 +197,54 @@ def test_pre_upgrade_check( _switchover.side_effect = [None, SwitchoverFailedError] # Test when not all members are ready. - with self.assertRaises(ClusterNotReadyError): - self.charm.upgrade.pre_upgrade_check() + with tc.assertRaises(ClusterNotReadyError): + harness.charm.upgrade.pre_upgrade_check() _switchover.assert_not_called() _set_list_of_sync_standbys.assert_not_called() _set_rolling_update_partition.assert_not_called() # Test when a backup is being created. - with self.assertRaises(ClusterNotReadyError): - self.charm.upgrade.pre_upgrade_check() + with tc.assertRaises(ClusterNotReadyError): + harness.charm.upgrade.pre_upgrade_check() _switchover.assert_not_called() _set_list_of_sync_standbys.assert_not_called() _set_rolling_update_partition.assert_not_called() # Test when the primary is already the first unit. - unit_zero_name = f"{self.charm.app.name}/0" + unit_zero_name = f"{harness.charm.app.name}/0" _get_primary.return_value = unit_zero_name - self.charm.upgrade.pre_upgrade_check() + harness.charm.upgrade.pre_upgrade_check() _switchover.assert_not_called() _set_list_of_sync_standbys.assert_not_called() - _set_rolling_update_partition.assert_called_once_with(self.charm.app.planned_units() - 1) + _set_rolling_update_partition.assert_called_once_with( + harness.charm.app.planned_units() - 1 + ) # Test when there are no sync-standbys. _set_rolling_update_partition.reset_mock() - _get_primary.return_value = f"{self.charm.app.name}/1" + _get_primary.return_value = f"{harness.charm.app.name}/1" _get_sync_standby_names.return_value = [] - with self.assertRaises(ClusterNotReadyError): - self.charm.upgrade.pre_upgrade_check() + with tc.assertRaises(ClusterNotReadyError): + harness.charm.upgrade.pre_upgrade_check() _switchover.assert_not_called() _set_list_of_sync_standbys.assert_not_called() _set_rolling_update_partition.assert_not_called() # Test when the first unit is a sync-standby. _set_rolling_update_partition.reset_mock() - _get_sync_standby_names.return_value = [unit_zero_name, f"{self.charm.app.name}/2"] - self.charm.upgrade.pre_upgrade_check() + _get_sync_standby_names.return_value = [unit_zero_name, f"{harness.charm.app.name}/2"] + harness.charm.upgrade.pre_upgrade_check() _switchover.assert_called_once_with(unit_zero_name) _set_list_of_sync_standbys.assert_not_called() - _set_rolling_update_partition.assert_called_once_with(self.charm.app.planned_units() - 1) + _set_rolling_update_partition.assert_called_once_with( + harness.charm.app.planned_units() - 1 + ) # Test when the switchover fails. _switchover.reset_mock() _set_rolling_update_partition.reset_mock() - with self.assertRaises(ClusterNotReadyError): - self.charm.upgrade.pre_upgrade_check() + with tc.assertRaises(ClusterNotReadyError): + harness.charm.upgrade.pre_upgrade_check() _switchover.assert_called_once_with(unit_zero_name) _set_list_of_sync_standbys.assert_not_called() _set_rolling_update_partition.assert_not_called() @@ -238,15 +252,18 @@ def test_pre_upgrade_check( # Test when the first unit is neither the primary nor a sync-standby. _switchover.reset_mock() _set_rolling_update_partition.reset_mock() - _get_sync_standby_names.return_value = f'["{self.charm.app.name}/2"]' - with self.assertRaises(ClusterNotReadyError): - self.charm.upgrade.pre_upgrade_check() + _get_sync_standby_names.return_value = f'["{harness.charm.app.name}/2"]' + with tc.assertRaises(ClusterNotReadyError): + harness.charm.upgrade.pre_upgrade_check() _switchover.assert_not_called() _set_list_of_sync_standbys.assert_called_once() _set_rolling_update_partition.assert_not_called() - @patch("charm.Patroni.get_sync_standby_names") - def test_set_list_of_sync_standbys(self, _get_sync_standby_names): + +def test_set_list_of_sync_standbys(harness): + with patch("charm.Patroni.get_sync_standby_names") as _get_sync_standby_names: + upgrade_relation_id = harness.model.get_relation("upgrade").id + peer_relation_id = harness.model.get_relation("database-peers").id # Mock some return values. _get_sync_standby_names.side_effect = [ ["postgresql-k8s/1"], @@ -255,72 +272,67 @@ def test_set_list_of_sync_standbys(self, _get_sync_standby_names): ] # Test when the there are less than 3 units in the cluster. - self.charm.upgrade._set_list_of_sync_standbys() - self.assertNotIn( + harness.charm.upgrade._set_list_of_sync_standbys() + tc.assertNotIn( "sync-standbys", - self.harness.get_relation_data(self.upgrade_relation_id, self.charm.app), + harness.get_relation_data(upgrade_relation_id, harness.charm.app), ) # Test when the there are 3 units in the cluster. - for rel_id in (self.upgrade_relation_id, self.peer_relation_id): - self.harness.add_relation_unit(rel_id, "postgresql-k8s/2") - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.upgrade_relation_id, "postgresql-k8s/2", {"state": "idle"} + for rel_id in (upgrade_relation_id, peer_relation_id): + harness.add_relation_unit(rel_id, "postgresql-k8s/2") + with harness.hooks_disabled(): + harness.update_relation_data( + upgrade_relation_id, "postgresql-k8s/2", {"state": "idle"} ) - self.charm.upgrade._set_list_of_sync_standbys() - self.assertEqual( - self.harness.get_relation_data(self.upgrade_relation_id, self.charm.app)[ - "sync-standbys" - ], + harness.charm.upgrade._set_list_of_sync_standbys() + tc.assertEqual( + harness.get_relation_data(upgrade_relation_id, harness.charm.app)["sync-standbys"], '["postgresql-k8s/0"]', ) # Test when the unit zero is already a sync-standby. - for rel_id in (self.upgrade_relation_id, self.peer_relation_id): - self.harness.add_relation_unit(rel_id, "postgresql-k8s/3") - with self.harness.hooks_disabled(): - self.harness.update_relation_data( - self.upgrade_relation_id, "postgresql-k8s/3", {"state": "idle"} + for rel_id in (upgrade_relation_id, peer_relation_id): + harness.add_relation_unit(rel_id, "postgresql-k8s/3") + with harness.hooks_disabled(): + harness.update_relation_data( + upgrade_relation_id, "postgresql-k8s/3", {"state": "idle"} ) - self.charm.upgrade._set_list_of_sync_standbys() - self.assertEqual( - self.harness.get_relation_data(self.upgrade_relation_id, self.charm.app)[ - "sync-standbys" - ], + harness.charm.upgrade._set_list_of_sync_standbys() + tc.assertEqual( + harness.get_relation_data(upgrade_relation_id, harness.charm.app)["sync-standbys"], '["postgresql-k8s/0", "postgresql-k8s/1"]', ) # Test when the unit zero is not a sync-standby yet. - self.charm.upgrade._set_list_of_sync_standbys() - self.assertEqual( - self.harness.get_relation_data(self.upgrade_relation_id, self.charm.app)[ - "sync-standbys" - ], + harness.charm.upgrade._set_list_of_sync_standbys() + tc.assertEqual( + harness.get_relation_data(upgrade_relation_id, harness.charm.app)["sync-standbys"], '["postgresql-k8s/1", "postgresql-k8s/0"]', ) - @patch("upgrade.Client") - def test_set_rolling_update_partition(self, _client): + +def test_set_rolling_update_partition(harness): + with patch("upgrade.Client") as _client: # Test the successful operation. - self.charm.upgrade._set_rolling_update_partition(2) + harness.charm.upgrade._set_rolling_update_partition(2) _client.return_value.patch.assert_called_once_with( StatefulSet, - name=self.charm.app.name, - namespace=self.charm.model.name, + name=harness.charm.app.name, + namespace=harness.charm.model.name, obj={"spec": {"updateStrategy": {"rollingUpdate": {"partition": 2}}}}, ) # Test an operation that failed due to lack of Juju's trust flag. _client.return_value.patch.reset_mock() _client.return_value.patch.side_effect = _FakeApiError(403) - with self.assertRaises(KubernetesClientError) as exception: - self.charm.upgrade._set_rolling_update_partition(2) - self.assertEqual(exception.exception.cause, "`juju trust` needed") + with tc.assertRaises(KubernetesClientError) as exception: + harness.charm.upgrade._set_rolling_update_partition(2) + tc.assertEqual(exception.exception.cause, "`juju trust` needed") # Test an operation that failed due to some other reason. _client.return_value.patch.reset_mock() _client.return_value.patch.side_effect = _FakeApiError - with self.assertRaises(KubernetesClientError) as exception: - self.charm.upgrade._set_rolling_update_partition(2) - self.assertEqual(exception.exception.cause, "broken") + with tc.assertRaises(KubernetesClientError) as exception: + harness.charm.upgrade._set_rolling_update_partition(2) + tc.assertEqual(exception.exception.cause, "broken") diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index de4812ce5d..1080e659e6 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -2,19 +2,21 @@ # See LICENSE file for licensing details. import re -import unittest +from unittest import TestCase from utils import new_password +# used for assert functions +tc = TestCase() -class TestUtils(unittest.TestCase): - def test_new_password(self): - # Test the password generation twice in order to check if we get different passwords and - # that they meet the required criteria. - first_password = new_password() - self.assertEqual(len(first_password), 16) - self.assertIsNotNone(re.fullmatch("[a-zA-Z0-9\b]{16}$", first_password)) - second_password = new_password() - self.assertIsNotNone(re.fullmatch("[a-zA-Z0-9\b]{16}$", second_password)) - self.assertNotEqual(second_password, first_password) +def test_new_password(): + # Test the password generation twice in order to check if we get different passwords and + # that they meet the required criteria. + first_password = new_password() + tc.assertEqual(len(first_password), 16) + tc.assertIsNotNone(re.fullmatch("[a-zA-Z0-9\b]{16}$", first_password)) + + second_password = new_password() + tc.assertIsNotNone(re.fullmatch("[a-zA-Z0-9\b]{16}$", second_password)) + tc.assertNotEqual(second_password, first_password)