Skip to content

Commit c5c916d

Browse files
Lock file maintenance Python dependencies (#644)
* Lock file maintenance Python dependencies * Fix linting --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Dragomir Penev <dragomir.penev@canonical.com>
1 parent 4286b57 commit c5c916d

19 files changed

+452
-459
lines changed

poetry.lock

Lines changed: 276 additions & 285 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,15 +7,15 @@ package-mode = false
77
[tool.poetry.dependencies]
88
python = "^3.10"
99
ops = "^2.17.0"
10-
cryptography = "^43.0.1"
11-
boto3 = "^1.35.38"
10+
cryptography = "^43.0.3"
11+
boto3 = "^1.35.49"
1212
pgconnstr = "^1.0.1"
1313
requests = "^2.32.3"
1414
tenacity = "^9.0.0"
15-
psycopg2 = "^2.9.9"
16-
cosl = "^0.0.40"
15+
psycopg2 = "^2.9.10"
16+
cosl = "^0.0.42"
1717
pydantic = "^1.10.18"
18-
poetry-core = "^1.9.0"
18+
poetry-core = "^1.9.1"
1919
pyOpenSSL = "^24.2.1"
2020
jinja2 = "^3.1.4"
2121

@@ -38,7 +38,7 @@ opentelemetry-exporter-otlp-proto-http = "1.21.0"
3838
optional = true
3939

4040
[tool.poetry.group.format.dependencies]
41-
ruff = "^0.6.9"
41+
ruff = "^0.7.1"
4242

4343
[tool.poetry.group.lint]
4444
optional = true
@@ -50,7 +50,7 @@ codespell = "^2.3.0"
5050
optional = true
5151

5252
[tool.poetry.group.unit.dependencies]
53-
coverage = {extras = ["toml"], version = "^7.6.2"}
53+
coverage = {extras = ["toml"], version = "^7.6.4"}
5454
pytest = "^8.3.3"
5555
pytest-asyncio = "*"
5656
parameterized = "^0.9.0"
@@ -71,7 +71,7 @@ boto3 = "*"
7171
tenacity = "*"
7272
landscape-api-py3 = "^0.9.0"
7373
mailmanclient = "^3.3.5"
74-
psycopg2-binary = "^2.9.9"
74+
psycopg2-binary = "^2.9.10"
7575
allure-pytest = "^2.13.5"
7676
allure-pytest-collection-report = {git = "https://github.com/canonical/data-platform-workflows", tag = "v23.0.4", subdirectory = "python/pytest_plugins/allure_pytest_collection_report"}
7777

src/backups.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ def can_use_s3_repository(self) -> tuple[bool, str | None]:
202202
return False, ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE
203203

204204
return_code, system_identifier_from_instance, error = self._execute_command([
205-
f'/snap/charmed-postgresql/current/usr/lib/postgresql/{self.charm._patroni.get_postgresql_version().split(".")[0]}/bin/pg_controldata',
205+
f"/snap/charmed-postgresql/current/usr/lib/postgresql/{self.charm._patroni.get_postgresql_version().split('.')[0]}/bin/pg_controldata",
206206
POSTGRESQL_DATA_PATH,
207207
])
208208
if return_code != 0:
@@ -244,7 +244,7 @@ def _construct_endpoint(self, s3_parameters: dict) -> str:
244244

245245
# Use the built endpoint if it is an AWS endpoint.
246246
if endpoint_data and endpoint.endswith(endpoint_data["dnsSuffix"]):
247-
endpoint = f'{endpoint.split("://")[0]}://{endpoint_data["hostname"]}'
247+
endpoint = f"{endpoint.split('://')[0]}://{endpoint_data['hostname']}"
248248

249249
return endpoint
250250

@@ -392,7 +392,7 @@ def _generate_backup_list_output(self) -> str:
392392
backup_reference = "None"
393393
if backup["reference"]:
394394
backup_reference, _ = self._parse_backup_id(backup["reference"][-1])
395-
lsn_start_stop = f'{backup["lsn"]["start"]} / {backup["lsn"]["stop"]}'
395+
lsn_start_stop = f"{backup['lsn']['start']} / {backup['lsn']['stop']}"
396396
time_start, time_stop = (
397397
datetime.strftime(
398398
datetime.fromtimestamp(stamp, timezone.utc), "%Y-%m-%dT%H:%M:%SZ"
@@ -404,7 +404,7 @@ def _generate_backup_list_output(self) -> str:
404404
if backup["archive"] and backup["archive"]["start"]
405405
else ""
406406
)
407-
backup_path = f'/{self.stanza_name}/{backup["label"]}'
407+
backup_path = f"/{self.stanza_name}/{backup['label']}"
408408
error = backup["error"]
409409
backup_status = "finished"
410410
if error:
@@ -1121,16 +1121,16 @@ def _generate_fake_backup_id(self, backup_type: str) -> str:
11211121

11221122
if last_full_backup is None:
11231123
raise TypeError("Differential backup requested but no previous full backup")
1124-
return f'{last_full_backup}_{datetime.strftime(datetime.now(), "%Y%m%d-%H%M%SD")}'
1124+
return f"{last_full_backup}_{datetime.strftime(datetime.now(), '%Y%m%d-%H%M%SD')}"
11251125
if backup_type == "incremental":
11261126
backups = self._list_backups(show_failed=False, parse=False).keys()
11271127
if not backups:
11281128
raise TypeError("Incremental backup requested but no previous successful backup")
1129-
return f'{backups[-1]}_{datetime.strftime(datetime.now(), "%Y%m%d-%H%M%SI")}'
1129+
return f"{backups[-1]}_{datetime.strftime(datetime.now(), '%Y%m%d-%H%M%SI')}"
11301130

11311131
def _fetch_backup_from_id(self, backup_id: str) -> str:
11321132
"""Fetches backup's pgbackrest label from backup id."""
1133-
timestamp = f'{datetime.strftime(datetime.strptime(backup_id, "%Y-%m-%dT%H:%M:%SZ"), "%Y%m%d-%H%M%S")}'
1133+
timestamp = f"{datetime.strftime(datetime.strptime(backup_id, '%Y-%m-%dT%H:%M:%SZ'), '%Y%m%d-%H%M%S')}"
11341134
backups = self._list_backups(show_failed=False, parse=False).keys()
11351135
for label in backups:
11361136
if timestamp in label:
@@ -1285,7 +1285,7 @@ def _retrieve_s3_parameters(self) -> tuple[dict, list[str]]:
12851285
# like Ceph Object Gateway (radosgw).
12861286
s3_parameters["endpoint"] = s3_parameters["endpoint"].rstrip("/")
12871287
s3_parameters["path"] = (
1288-
f'/{s3_parameters["path"].strip("/")}' # The slash in the beginning is required by pgBackRest.
1288+
f"/{s3_parameters['path'].strip('/')}" # The slash in the beginning is required by pgBackRest.
12891289
)
12901290
s3_parameters["bucket"] = s3_parameters["bucket"].strip("/")
12911291

src/relations/async_replication.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ def result():
339339
# Input is hardcoded
340340
process = run( # noqa: S603
341341
[
342-
f'/snap/charmed-postgresql/current/usr/lib/postgresql/{self.charm._patroni.get_postgresql_version().split(".")[0]}/bin/pg_controldata',
342+
f"/snap/charmed-postgresql/current/usr/lib/postgresql/{self.charm._patroni.get_postgresql_version().split('.')[0]}/bin/pg_controldata",
343343
POSTGRESQL_DATA_PATH,
344344
],
345345
capture_output=True,
@@ -645,7 +645,7 @@ def _primary_cluster_endpoint(self) -> str:
645645
def _re_emit_async_relation_changed_event(self) -> None:
646646
"""Re-emit the async relation changed event."""
647647
relation = self._relation
648-
getattr(self.charm.on, f'{relation.name.replace("-", "_")}_relation_changed').emit(
648+
getattr(self.charm.on, f"{relation.name.replace('-', '_')}_relation_changed").emit(
649649
relation,
650650
app=relation.app,
651651
unit=next(unit for unit in relation.units if unit.app == relation.app),

tests/integration/ha_tests/helpers.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ async def are_all_db_processes_down(ops_test: OpsTest, process: str, signal: str
7676

7777
# If something was returned, there is a running process.
7878
if len(processes) > 0:
79-
logger.info("Unit {unit.name} not yet down")
79+
logger.info(f"Unit {unit.name} not yet down")
8080
# Try to rekill the unit
8181
await send_signal_to_process(ops_test, unit.name, process, signal)
8282
raise ProcessRunningError
@@ -108,9 +108,9 @@ async def are_writes_increasing(
108108
use_ip_from_inside=use_ip_from_inside,
109109
extra_model=extra_model,
110110
)
111-
assert (
112-
more_writes[member] > count
113-
), f"{member}: writes not continuing to DB (current writes: {more_writes[member]} - previous writes: {count})"
111+
assert more_writes[member] > count, (
112+
f"{member}: writes not continuing to DB (current writes: {more_writes[member]} - previous writes: {count})"
113+
)
114114

115115

116116
async def app_name(
@@ -214,9 +214,9 @@ async def is_cluster_updated(
214214
) -> None:
215215
# Verify that the old primary is now a replica.
216216
logger.info("checking that the former primary is now a replica")
217-
assert await is_replica(
218-
ops_test, primary_name, use_ip_from_inside
219-
), "there are more than one primary in the cluster."
217+
assert await is_replica(ops_test, primary_name, use_ip_from_inside), (
218+
"there are more than one primary in the cluster."
219+
)
220220

221221
# Verify that all units are part of the same cluster.
222222
logger.info("checking that all units are part of the same cluster")
@@ -255,9 +255,9 @@ async def check_writes(
255255
print(
256256
f"member: {member}, count: {count}, max_number_written: {max_number_written[member]}, total_expected_writes: {total_expected_writes}"
257257
)
258-
assert (
259-
count == max_number_written[member]
260-
), f"{member}: writes to the db were missed: count of actual writes different from the max number written."
258+
assert count == max_number_written[member], (
259+
f"{member}: writes to the db were missed: count of actual writes different from the max number written."
260+
)
261261
assert total_expected_writes == count, f"{member}: writes to the db were missed."
262262
return total_expected_writes
263263

@@ -309,7 +309,7 @@ def count_writes_on_members(members, password, down_ips) -> tuple[dict[str, int]
309309
f" host='{host}' password='{password}' connect_timeout=10"
310310
)
311311

312-
member_name = f'{member["model"]}.{member["name"]}'
312+
member_name = f"{member['model']}.{member['name']}"
313313
connection = None
314314
try:
315315
with (
@@ -378,9 +378,9 @@ async def fetch_cluster_members(ops_test: OpsTest, use_ip_from_inside: bool = Fa
378378
if len(member_ips) > 0:
379379
# If the list of members IPs was already fetched, also compare the
380380
# list provided by other members.
381-
assert member_ips == {
382-
member["host"] for member in cluster_info.json()["members"]
383-
}, "members report different lists of cluster members."
381+
assert member_ips == {member["host"] for member in cluster_info.json()["members"]}, (
382+
"members report different lists of cluster members."
383+
)
384384
else:
385385
member_ips = {member["host"] for member in cluster_info.json()["members"]}
386386
return member_ips
@@ -929,9 +929,9 @@ async def add_unit_with_storage(ops_test, app, storage):
929929
assert return_code == 0, "Failed to add unit with storage"
930930
async with ops_test.fast_forward():
931931
await ops_test.model.wait_for_idle(apps=[app], status="active", timeout=2000)
932-
assert (
933-
len(ops_test.model.applications[app].units) == expected_units
934-
), "New unit not added to model"
932+
assert len(ops_test.model.applications[app].units) == expected_units, (
933+
"New unit not added to model"
934+
)
935935

936936
# verify storage attached
937937
curr_units = [unit.name for unit in ops_test.model.applications[app].units]

tests/integration/ha_tests/test_async_replication.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,7 @@ async def test_promote_standby(
371371
primary = await get_primary(ops_test, any_unit)
372372
address = get_unit_address(ops_test, primary)
373373
password = await get_password(ops_test, primary)
374-
database_name = f'{APPLICATION_NAME.replace("-", "_")}_database'
374+
database_name = f"{APPLICATION_NAME.replace('-', '_')}_database"
375375
connection = None
376376
try:
377377
connection = psycopg2.connect(

tests/integration/ha_tests/test_replication.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -149,8 +149,8 @@ async def test_no_data_replicated_between_clusters(ops_test: OpsTest, continuous
149149
"SELECT EXISTS (SELECT FROM information_schema.tables"
150150
" WHERE table_schema = 'public' AND table_name = 'continuous_writes');"
151151
)
152-
assert not cursor.fetchone()[
153-
0
154-
], "table 'continuous_writes' was replicated to the second cluster"
152+
assert not cursor.fetchone()[0], (
153+
"table 'continuous_writes' was replicated to the second cluster"
154+
)
155155
finally:
156156
connection.close()

tests/integration/ha_tests/test_restore_cluster.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -97,9 +97,9 @@ async def test_cluster_restore(ops_test):
9797
logger.info("Upscaling the second cluster with the old data")
9898
for storage in storages:
9999
unit = await add_unit_with_storage(ops_test, SECOND_APPLICATION, storage)
100-
assert await reused_full_cluster_recovery_storage(
101-
ops_test, unit.name
102-
), "attached storage not properly re-used by Postgresql."
100+
assert await reused_full_cluster_recovery_storage(ops_test, unit.name), (
101+
"attached storage not properly re-used by Postgresql."
102+
)
103103

104104
primary = await get_primary(
105105
ops_test, ops_test.model.applications[SECOND_APPLICATION].units[0].name
@@ -111,9 +111,9 @@ async def test_cluster_restore(ops_test):
111111
"SELECT EXISTS (SELECT FROM information_schema.tables"
112112
" WHERE table_schema = 'public' AND table_name = 'restore_table_1');"
113113
)
114-
assert cursor.fetchone()[
115-
0
116-
], "data wasn't correctly restored: table 'restore_table_1' doesn't exist"
114+
assert cursor.fetchone()[0], (
115+
"data wasn't correctly restored: table 'restore_table_1' doesn't exist"
116+
)
117117
connection.close()
118118

119119
# check that there is only one primary

0 commit comments

Comments
 (0)