From 07d0c45a8c04fd0482ac90f291eced799f8c9756 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Fri, 7 Mar 2025 19:06:59 +0200 Subject: [PATCH 1/4] Bump libs --- lib/charms/grafana_agent/v0/cos_agent.py | 9 ++++++++- lib/charms/rolling_ops/v0/rollingops.py | 8 ++++---- lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py | 7 +++++-- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/lib/charms/grafana_agent/v0/cos_agent.py b/lib/charms/grafana_agent/v0/cos_agent.py index f1344c06ac..b18c271342 100644 --- a/lib/charms/grafana_agent/v0/cos_agent.py +++ b/lib/charms/grafana_agent/v0/cos_agent.py @@ -254,7 +254,7 @@ class _MetricsEndpointDict(TypedDict): LIBID = "dc15fa84cef84ce58155fb84f6c6213a" LIBAPI = 0 -LIBPATCH = 19 +LIBPATCH = 20 PYDEPS = ["cosl >= 0.0.50", "pydantic"] @@ -758,6 +758,13 @@ def _dashboards(self) -> List[str]: # because there is currently no other way to communicate the dashboard path separately. # https://github.com/canonical/grafana-k8s-operator/pull/363 dashboard["uid"] = DashboardPath40UID.generate(self._charm.meta.name, rel_path) + + # Add tags + tags: List[str] = dashboard.get("tags", []) + if not any(tag.startswith("charm: ") for tag in tags): + tags.append(f"charm: {self._charm.meta.name}") + dashboard["tags"] = tags + dashboards.append(LZMABase64.compress(json.dumps(dashboard))) return dashboards diff --git a/lib/charms/rolling_ops/v0/rollingops.py b/lib/charms/rolling_ops/v0/rollingops.py index 57aa9bf352..13b51a3051 100644 --- a/lib/charms/rolling_ops/v0/rollingops.py +++ b/lib/charms/rolling_ops/v0/rollingops.py @@ -63,13 +63,14 @@ def _on_trigger_restart(self, event): juju run-action some-charm/0 some-charm/1 <... some-charm/n> restart ``` -Note that all units that plan to restart must receive the action and emit the aquire +Note that all units that plan to restart must receive the action and emit the acquire event. Any units that do not run their acquire handler will be left out of the rolling restart. (An operator might take advantage of this fact to recover from a failed rolling operation without restarting workloads that were able to successfully restart -- simply omit the successful units from a subsequent run-action call.) """ + import logging from enum import Enum from typing import AnyStr, Callable, Optional @@ -88,7 +89,7 @@ def _on_trigger_restart(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 7 +LIBPATCH = 8 class LockNoRelationError(Exception): @@ -149,7 +150,6 @@ class Lock: """ def __init__(self, manager, unit=None): - self.relation = manager.model.relations[manager.name][0] if not self.relation: # TODO: defer caller in this case (probably just fired too soon). @@ -246,7 +246,7 @@ def __init__(self, manager): # Gather all the units. relation = manager.model.relations[manager.name][0] - units = [unit for unit in relation.units] + units = list(relation.units) # Plus our unit ... units.append(manager.model.unit) diff --git a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py index ebf80ede2e..e2208f756f 100644 --- a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py +++ b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py @@ -325,7 +325,10 @@ def _remove_stale_otel_sdk_packages(): SpanExportResult, ) from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter -from opentelemetry.trace import INVALID_SPAN, Tracer +from opentelemetry.trace import ( + INVALID_SPAN, + Tracer, +) from opentelemetry.trace import get_current_span as otlp_get_current_span from opentelemetry.trace import ( get_tracer, @@ -345,7 +348,7 @@ def _remove_stale_otel_sdk_packages(): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 6 +LIBPATCH = 7 PYDEPS = ["opentelemetry-exporter-otlp-proto-http==1.21.0"] From 523e7797e3e58189de105499bf736e0b7dc10e68 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Fri, 7 Mar 2025 19:23:47 +0200 Subject: [PATCH 2/4] Skip backup tests without creds --- tests/integration/conftest.py | 83 ++++++++++++++++++++++ tests/integration/test_backups_aws.py | 59 ++------------- tests/integration/test_backups_gcp.py | 70 +++--------------- tests/integration/test_backups_pitr_aws.py | 60 ++-------------- tests/integration/test_backups_pitr_gcp.py | 60 ++-------------- 5 files changed, 108 insertions(+), 224 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index bdce9d8e13..e644af55cd 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,8 +1,20 @@ # Copyright 2022 Canonical Ltd. # See LICENSE file for licensing details. +import logging +import os +import uuid + +import boto3 import pytest +from pytest_operator.plugin import OpsTest from . import architecture +from .helpers import construct_endpoint + +AWS = "AWS" +GCP = "GCP" + +logger = logging.getLogger(__name__) @pytest.fixture(scope="session") @@ -11,3 +23,74 @@ def charm(): # juju bundle files expect local charms to begin with `./` or `/` to distinguish them from # Charmhub charms. return f"./postgresql_ubuntu@22.04-{architecture.architecture}.charm" + + +def get_cloud_config(cloud: str) -> tuple[dict[str, str], dict[str, str]]: + # Define some configurations and credentials. + if cloud == AWS: + return { + "endpoint": "https://s3.amazonaws.com", + "bucket": "data-charms-testing", + "path": f"/postgresql-k8s/{uuid.uuid1()}", + "region": "us-east-1", + }, { + "access-key": os.environ["AWS_ACCESS_KEY"], + "secret-key": os.environ["AWS_SECRET_KEY"], + } + elif cloud == GCP: + return { + "endpoint": "https://storage.googleapis.com", + "bucket": "data-charms-testing", + "path": f"/postgresql-k8s/{uuid.uuid1()}", + "region": "", + }, { + "access-key": os.environ["GCP_ACCESS_KEY"], + "secret-key": os.environ["GCP_SECRET_KEY"], + } + + +def cleanup_cloud(config: dict[str, str], credentials: dict[str, str]) -> None: + # Delete the previously created objects. + logger.info("deleting the previously created backups") + session = boto3.session.Session( + aws_access_key_id=credentials["access-key"], + aws_secret_access_key=credentials["secret-key"], + region_name=config["region"], + ) + s3 = session.resource( + "s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"]) + ) + bucket = s3.Bucket(config["bucket"]) + # GCS doesn't support batch delete operation, so delete the objects one by one. + for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")): + bucket_object.delete() + + +@pytest.fixture(scope="module") +async def aws_cloud_configs(ops_test: OpsTest) -> None: + if ( + not os.environ.get("AWS_ACCESS_KEY", "").strip() + or not os.environ.get("AWS_SECRET_KEY", "").strip() + ): + pytest.skip("AWS configs not set") + return + + config, credentials = get_cloud_config(AWS) + yield config, credentials + + cleanup_cloud(config, credentials) + + +@pytest.fixture(scope="module") +async def gcp_cloud_configs(ops_test: OpsTest) -> None: + if ( + not os.environ.get("GCP_ACCESS_KEY", "").strip() + or not os.environ.get("GCP_SECRET_KEY", "").strip() + ): + pytest.skip("GCP configs not set") + return + + config, credentials = get_cloud_config(GCP) + yield config, credentials + + cleanup_cloud(config, credentials) diff --git a/tests/integration/test_backups_aws.py b/tests/integration/test_backups_aws.py index 76343af0ac..49ed3545a2 100644 --- a/tests/integration/test_backups_aws.py +++ b/tests/integration/test_backups_aws.py @@ -2,19 +2,16 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. import logging -import os -import uuid -import boto3 import pytest as pytest from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_exponential from . import architecture +from .conftest import AWS from .helpers import ( DATABASE_APP_NAME, backup_operations, - construct_endpoint, db_connect, get_password, get_primary, @@ -40,60 +37,12 @@ logger = logging.getLogger(__name__) -AWS = "AWS" -GCP = "GCP" - - -@pytest.fixture(scope="module") -async def cloud_configs() -> None: - # Define some configurations and credentials. - configs = { - AWS: { - "endpoint": "https://s3.amazonaws.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-vm/{uuid.uuid1()}", - "region": "us-east-1", - }, - GCP: { - "endpoint": "https://storage.googleapis.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-vm/{uuid.uuid1()}", - "region": "", - }, - } - credentials = { - AWS: { - "access-key": os.environ["AWS_ACCESS_KEY"], - "secret-key": os.environ["AWS_SECRET_KEY"], - }, - GCP: { - "access-key": os.environ["GCP_ACCESS_KEY"], - "secret-key": os.environ["GCP_SECRET_KEY"], - }, - } - yield configs, credentials - # Delete the previously created objects. - logger.info("deleting the previously created backups") - for cloud, config in configs.items(): - session = boto3.session.Session( - aws_access_key_id=credentials[cloud]["access-key"], - aws_secret_access_key=credentials[cloud]["secret-key"], - region_name=config["region"], - ) - s3 = session.resource( - "s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"]) - ) - bucket = s3.Bucket(config["bucket"]) - # GCS doesn't support batch delete operation, so delete the objects one by one. - for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")): - bucket_object.delete() - @pytest.mark.abort_on_fail -async def test_backup_aws(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None: +async def test_backup_aws(ops_test: OpsTest, aws_cloud_configs: tuple[dict, dict], charm) -> None: """Build and deploy two units of PostgreSQL in AWS, test backup and restore actions.""" - config = cloud_configs[0][AWS] - credentials = cloud_configs[1][AWS] + config = aws_cloud_configs[0] + credentials = aws_cloud_configs[1] await backup_operations( ops_test, diff --git a/tests/integration/test_backups_gcp.py b/tests/integration/test_backups_gcp.py index 63cb3617bd..d88fd894de 100644 --- a/tests/integration/test_backups_gcp.py +++ b/tests/integration/test_backups_gcp.py @@ -2,20 +2,18 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. import logging -import os import uuid -import boto3 import pytest as pytest from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_exponential from . import architecture +from .conftest import GCP from .helpers import ( CHARM_BASE, DATABASE_APP_NAME, backup_operations, - construct_endpoint, db_connect, get_password, get_unit_address, @@ -39,60 +37,12 @@ logger = logging.getLogger(__name__) -AWS = "AWS" -GCP = "GCP" - - -@pytest.fixture(scope="module") -async def cloud_configs() -> None: - # Define some configurations and credentials. - configs = { - AWS: { - "endpoint": "https://s3.amazonaws.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-vm/{uuid.uuid1()}", - "region": "us-east-1", - }, - GCP: { - "endpoint": "https://storage.googleapis.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-vm/{uuid.uuid1()}", - "region": "", - }, - } - credentials = { - AWS: { - "access-key": os.environ["AWS_ACCESS_KEY"], - "secret-key": os.environ["AWS_SECRET_KEY"], - }, - GCP: { - "access-key": os.environ["GCP_ACCESS_KEY"], - "secret-key": os.environ["GCP_SECRET_KEY"], - }, - } - yield configs, credentials - # Delete the previously created objects. - logger.info("deleting the previously created backups") - for cloud, config in configs.items(): - session = boto3.session.Session( - aws_access_key_id=credentials[cloud]["access-key"], - aws_secret_access_key=credentials[cloud]["secret-key"], - region_name=config["region"], - ) - s3 = session.resource( - "s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"]) - ) - bucket = s3.Bucket(config["bucket"]) - # GCS doesn't support batch delete operation, so delete the objects one by one. - for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")): - bucket_object.delete() - @pytest.mark.abort_on_fail -async def test_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None: +async def test_backup_gcp(ops_test: OpsTest, gcp_cloud_configs: tuple[dict, dict], charm) -> None: """Build and deploy two units of PostgreSQL in GCP, test backup and restore actions.""" - config = cloud_configs[0][GCP] - credentials = cloud_configs[1][GCP] + config = gcp_cloud_configs[0] + credentials = gcp_cloud_configs[1] await backup_operations( ops_test, @@ -114,7 +64,9 @@ async def test_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict], c await ops_test.model.remove_application(tls_certificates_app_name, block_until_done=True) -async def test_restore_on_new_cluster(ops_test: OpsTest, charm) -> None: +async def test_restore_on_new_cluster( + ops_test: OpsTest, charm, gcp_cloud_configs: tuple[dict, dict] +) -> None: """Test that is possible to restore a backup to another PostgreSQL cluster.""" previous_database_app_name = f"{DATABASE_APP_NAME}-gcp" database_app_name = f"new-{DATABASE_APP_NAME}" @@ -212,7 +164,7 @@ async def test_restore_on_new_cluster(ops_test: OpsTest, charm) -> None: async def test_invalid_config_and_recovery_after_fixing_it( - ops_test: OpsTest, cloud_configs: tuple[dict, dict] + ops_test: OpsTest, gcp_cloud_configs: tuple[dict, dict] ) -> None: """Test that the charm can handle invalid and valid backup configurations.""" database_app_name = f"new-{DATABASE_APP_NAME}" @@ -243,10 +195,10 @@ async def test_invalid_config_and_recovery_after_fixing_it( logger.info( "configuring S3 integrator for a valid cloud, but with the path of another cluster repository" ) - await ops_test.model.applications[S3_INTEGRATOR_APP_NAME].set_config(cloud_configs[0][GCP]) + await ops_test.model.applications[S3_INTEGRATOR_APP_NAME].set_config(gcp_cloud_configs[0]) action = await ops_test.model.units.get(f"{S3_INTEGRATOR_APP_NAME}/0").run_action( "sync-s3-credentials", - **cloud_configs[1][GCP], + **gcp_cloud_configs[1], ) await action.wait() logger.info("waiting for the database charm to become blocked") @@ -257,7 +209,7 @@ async def test_invalid_config_and_recovery_after_fixing_it( # Provide valid backup configurations, with another path in the S3 bucket. logger.info("configuring S3 integrator for a valid cloud") - config = cloud_configs[0][GCP].copy() + config = gcp_cloud_configs[0].copy() config["path"] = f"/postgresql/{uuid.uuid1()}" await ops_test.model.applications[S3_INTEGRATOR_APP_NAME].set_config(config) logger.info("waiting for the database charm to become active") diff --git a/tests/integration/test_backups_pitr_aws.py b/tests/integration/test_backups_pitr_aws.py index 70da90c104..6220691ec9 100644 --- a/tests/integration/test_backups_pitr_aws.py +++ b/tests/integration/test_backups_pitr_aws.py @@ -2,19 +2,16 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import logging -import os -import uuid -import boto3 import pytest as pytest from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_exponential from . import architecture +from .conftest import AWS from .helpers import ( CHARM_BASE, DATABASE_APP_NAME, - construct_endpoint, db_connect, get_password, get_primary, @@ -35,54 +32,6 @@ logger = logging.getLogger(__name__) -AWS = "AWS" -GCP = "GCP" - - -@pytest.fixture(scope="module") -async def cloud_configs() -> None: - # Define some configurations and credentials. - configs = { - AWS: { - "endpoint": "https://s3.amazonaws.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-vm/{uuid.uuid1()}", - "region": "us-east-1", - }, - GCP: { - "endpoint": "https://storage.googleapis.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-vm/{uuid.uuid1()}", - "region": "", - }, - } - credentials = { - AWS: { - "access-key": os.environ["AWS_ACCESS_KEY"], - "secret-key": os.environ["AWS_SECRET_KEY"], - }, - GCP: { - "access-key": os.environ["GCP_ACCESS_KEY"], - "secret-key": os.environ["GCP_SECRET_KEY"], - }, - } - yield configs, credentials - # Delete the previously created objects. - logger.info("deleting the previously created backups") - for cloud, config in configs.items(): - session = boto3.session.Session( - aws_access_key_id=credentials[cloud]["access-key"], - aws_secret_access_key=credentials[cloud]["secret-key"], - region_name=config["region"], - ) - s3 = session.resource( - "s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"]) - ) - bucket = s3.Bucket(config["bucket"]) - # GCS doesn't support batch delete operation, so delete the objects one by one. - for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")): - bucket_object.delete() - async def pitr_backup_operations( ops_test: OpsTest, @@ -376,10 +325,11 @@ async def pitr_backup_operations( @pytest.mark.abort_on_fail -async def test_pitr_backup_aws(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None: +async def test_pitr_backup_aws( + ops_test: OpsTest, gcp_cloud_configs: tuple[dict, dict], charm +) -> None: """Build, deploy two units of PostgreSQL and do backup in AWS. Then, write new data into DB, switch WAL file and test point-in-time-recovery restore action.""" - config = cloud_configs[0][AWS] - credentials = cloud_configs[1][AWS] + config, credentials = gcp_cloud_configs await pitr_backup_operations( ops_test, diff --git a/tests/integration/test_backups_pitr_gcp.py b/tests/integration/test_backups_pitr_gcp.py index e85ac25610..4194bdd068 100644 --- a/tests/integration/test_backups_pitr_gcp.py +++ b/tests/integration/test_backups_pitr_gcp.py @@ -2,19 +2,16 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. import logging -import os -import uuid -import boto3 import pytest as pytest from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_exponential from . import architecture +from .conftest import GCP from .helpers import ( CHARM_BASE, DATABASE_APP_NAME, - construct_endpoint, db_connect, get_password, get_primary, @@ -35,54 +32,6 @@ logger = logging.getLogger(__name__) -AWS = "AWS" -GCP = "GCP" - - -@pytest.fixture(scope="module") -async def cloud_configs() -> None: - # Define some configurations and credentials. - configs = { - AWS: { - "endpoint": "https://s3.amazonaws.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-vm/{uuid.uuid1()}", - "region": "us-east-1", - }, - GCP: { - "endpoint": "https://storage.googleapis.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-vm/{uuid.uuid1()}", - "region": "", - }, - } - credentials = { - AWS: { - "access-key": os.environ["AWS_ACCESS_KEY"], - "secret-key": os.environ["AWS_SECRET_KEY"], - }, - GCP: { - "access-key": os.environ["GCP_ACCESS_KEY"], - "secret-key": os.environ["GCP_SECRET_KEY"], - }, - } - yield configs, credentials - # Delete the previously created objects. - logger.info("deleting the previously created backups") - for cloud, config in configs.items(): - session = boto3.session.Session( - aws_access_key_id=credentials[cloud]["access-key"], - aws_secret_access_key=credentials[cloud]["secret-key"], - region_name=config["region"], - ) - s3 = session.resource( - "s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"]) - ) - bucket = s3.Bucket(config["bucket"]) - # GCS doesn't support batch delete operation, so delete the objects one by one. - for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")): - bucket_object.delete() - async def pitr_backup_operations( ops_test: OpsTest, @@ -376,10 +325,11 @@ async def pitr_backup_operations( @pytest.mark.abort_on_fail -async def test_pitr_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None: +async def test_pitr_backup_gcp( + ops_test: OpsTest, gcp_cloud_configs: tuple[dict, dict], charm +) -> None: """Build, deploy two units of PostgreSQL and do backup in GCP. Then, write new data into DB, switch WAL file and test point-in-time-recovery restore action.""" - config = cloud_configs[0][GCP] - credentials = cloud_configs[1][GCP] + config, credentials = gcp_cloud_configs await pitr_backup_operations( ops_test, From a48ae0d216ed3d1827b494def111386ce8c169a9 Mon Sep 17 00:00:00 2001 From: Dragomir Penev Date: Mon, 10 Mar 2025 11:10:43 +0200 Subject: [PATCH 3/4] Skip subordinate tests --- tests/integration/test_subordinates.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_subordinates.py b/tests/integration/test_subordinates.py index c03288ae36..534e3d764d 100644 --- a/tests/integration/test_subordinates.py +++ b/tests/integration/test_subordinates.py @@ -21,8 +21,18 @@ logger = logging.getLogger(__name__) +@pytest.fixture(scope="module") +async def check_subordinate_env_vars(ops_test: OpsTest) -> None: + if ( + not os.environ.get("UBUNTU_PRO_TOKEN", "").strip() + or not os.environ.get("LANDSCAPE_ACCOUNT_NAME", "").strip() + or not os.environ.get("LANDSCAPE_REGISTRATION_KEY", "").strip() + ): + pytest.skip("Subordiane configs not set") + + @pytest.mark.abort_on_fail -async def test_deploy(ops_test: OpsTest, charm: str): +async def test_deploy(ops_test: OpsTest, charm: str, check_subordinate_env_vars): await gather( ops_test.model.deploy( charm, @@ -60,7 +70,7 @@ async def test_deploy(ops_test: OpsTest, charm: str): ) -async def test_scale_up(ops_test: OpsTest): +async def test_scale_up(ops_test: OpsTest, check_subordinate_env_vars): await scale_application(ops_test, DATABASE_APP_NAME, 4) await ops_test.model.wait_for_idle( @@ -68,7 +78,7 @@ async def test_scale_up(ops_test: OpsTest): ) -async def test_scale_down(ops_test: OpsTest): +async def test_scale_down(ops_test: OpsTest, check_subordinate_env_vars): await scale_application(ops_test, DATABASE_APP_NAME, 3) await ops_test.model.wait_for_idle( From f6ffdb3980b7205a34b7651543470c3a7ba148e9 Mon Sep 17 00:00:00 2001 From: Dragomir Penev <6687393+dragomirp@users.noreply.github.com> Date: Mon, 10 Mar 2025 11:52:16 +0200 Subject: [PATCH 4/4] Update tests/integration/test_subordinates.py Co-authored-by: Carl Csaposs --- tests/integration/test_subordinates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_subordinates.py b/tests/integration/test_subordinates.py index 534e3d764d..585bd765db 100644 --- a/tests/integration/test_subordinates.py +++ b/tests/integration/test_subordinates.py @@ -28,7 +28,7 @@ async def check_subordinate_env_vars(ops_test: OpsTest) -> None: or not os.environ.get("LANDSCAPE_ACCOUNT_NAME", "").strip() or not os.environ.get("LANDSCAPE_REGISTRATION_KEY", "").strip() ): - pytest.skip("Subordiane configs not set") + pytest.skip("Subordinate configs not set") @pytest.mark.abort_on_fail