Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion lib/charms/grafana_agent/v0/cos_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ class _MetricsEndpointDict(TypedDict):

LIBID = "dc15fa84cef84ce58155fb84f6c6213a"
LIBAPI = 0
LIBPATCH = 19
LIBPATCH = 20

PYDEPS = ["cosl >= 0.0.50", "pydantic"]

Expand Down Expand Up @@ -758,6 +758,13 @@ def _dashboards(self) -> List[str]:
# because there is currently no other way to communicate the dashboard path separately.
# https://github.com/canonical/grafana-k8s-operator/pull/363
dashboard["uid"] = DashboardPath40UID.generate(self._charm.meta.name, rel_path)

# Add tags
tags: List[str] = dashboard.get("tags", [])
if not any(tag.startswith("charm: ") for tag in tags):
tags.append(f"charm: {self._charm.meta.name}")
dashboard["tags"] = tags

dashboards.append(LZMABase64.compress(json.dumps(dashboard)))
return dashboards

Expand Down
8 changes: 4 additions & 4 deletions lib/charms/rolling_ops/v0/rollingops.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,13 +63,14 @@ def _on_trigger_restart(self, event):
juju run-action some-charm/0 some-charm/1 <... some-charm/n> restart
```

Note that all units that plan to restart must receive the action and emit the aquire
Note that all units that plan to restart must receive the action and emit the acquire
event. Any units that do not run their acquire handler will be left out of the rolling
restart. (An operator might take advantage of this fact to recover from a failed rolling
operation without restarting workloads that were able to successfully restart -- simply
omit the successful units from a subsequent run-action call.)

"""

import logging
from enum import Enum
from typing import AnyStr, Callable, Optional
Expand All @@ -88,7 +89,7 @@ def _on_trigger_restart(self, event):

# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 7
LIBPATCH = 8


class LockNoRelationError(Exception):
Expand Down Expand Up @@ -149,7 +150,6 @@ class Lock:
"""

def __init__(self, manager, unit=None):

self.relation = manager.model.relations[manager.name][0]
if not self.relation:
# TODO: defer caller in this case (probably just fired too soon).
Expand Down Expand Up @@ -246,7 +246,7 @@ def __init__(self, manager):

# Gather all the units.
relation = manager.model.relations[manager.name][0]
units = [unit for unit in relation.units]
units = list(relation.units)

# Plus our unit ...
units.append(manager.model.unit)
Expand Down
7 changes: 5 additions & 2 deletions lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,10 @@ def _remove_stale_otel_sdk_packages():
SpanExportResult,
)
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
from opentelemetry.trace import INVALID_SPAN, Tracer
from opentelemetry.trace import (
INVALID_SPAN,
Tracer,
)
from opentelemetry.trace import get_current_span as otlp_get_current_span
from opentelemetry.trace import (
get_tracer,
Expand All @@ -345,7 +348,7 @@ def _remove_stale_otel_sdk_packages():
# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version

LIBPATCH = 6
LIBPATCH = 7

PYDEPS = ["opentelemetry-exporter-otlp-proto-http==1.21.0"]

Expand Down
83 changes: 83 additions & 0 deletions tests/integration/conftest.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,20 @@
# Copyright 2022 Canonical Ltd.
# See LICENSE file for licensing details.
import logging
import os
import uuid

import boto3
import pytest
from pytest_operator.plugin import OpsTest

from . import architecture
from .helpers import construct_endpoint

AWS = "AWS"
GCP = "GCP"

logger = logging.getLogger(__name__)


@pytest.fixture(scope="session")
Expand All @@ -11,3 +23,74 @@ def charm():
# juju bundle files expect local charms to begin with `./` or `/` to distinguish them from
# Charmhub charms.
return f"./postgresql_ubuntu@22.04-{architecture.architecture}.charm"


def get_cloud_config(cloud: str) -> tuple[dict[str, str], dict[str, str]]:
# Define some configurations and credentials.
if cloud == AWS:
return {
"endpoint": "https://s3.amazonaws.com",
"bucket": "data-charms-testing",
"path": f"/postgresql-k8s/{uuid.uuid1()}",
"region": "us-east-1",
}, {
"access-key": os.environ["AWS_ACCESS_KEY"],
"secret-key": os.environ["AWS_SECRET_KEY"],
}
elif cloud == GCP:
return {
"endpoint": "https://storage.googleapis.com",
"bucket": "data-charms-testing",
"path": f"/postgresql-k8s/{uuid.uuid1()}",
"region": "",
}, {
"access-key": os.environ["GCP_ACCESS_KEY"],
"secret-key": os.environ["GCP_SECRET_KEY"],
}


def cleanup_cloud(config: dict[str, str], credentials: dict[str, str]) -> None:
# Delete the previously created objects.
logger.info("deleting the previously created backups")
session = boto3.session.Session(
aws_access_key_id=credentials["access-key"],
aws_secret_access_key=credentials["secret-key"],
region_name=config["region"],
)
s3 = session.resource(
"s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"])
)
bucket = s3.Bucket(config["bucket"])
# GCS doesn't support batch delete operation, so delete the objects one by one.
for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")):
bucket_object.delete()


@pytest.fixture(scope="module")
async def aws_cloud_configs(ops_test: OpsTest) -> None:
if (
not os.environ.get("AWS_ACCESS_KEY", "").strip()
or not os.environ.get("AWS_SECRET_KEY", "").strip()
):
pytest.skip("AWS configs not set")
return

config, credentials = get_cloud_config(AWS)
yield config, credentials

cleanup_cloud(config, credentials)


@pytest.fixture(scope="module")
async def gcp_cloud_configs(ops_test: OpsTest) -> None:
if (
not os.environ.get("GCP_ACCESS_KEY", "").strip()
or not os.environ.get("GCP_SECRET_KEY", "").strip()
):
pytest.skip("GCP configs not set")
return

config, credentials = get_cloud_config(GCP)
yield config, credentials

cleanup_cloud(config, credentials)
59 changes: 4 additions & 55 deletions tests/integration/test_backups_aws.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,16 @@
# Copyright 2023 Canonical Ltd.
# See LICENSE file for licensing details.
import logging
import os
import uuid

import boto3
import pytest as pytest
from pytest_operator.plugin import OpsTest
from tenacity import Retrying, stop_after_attempt, wait_exponential

from . import architecture
from .conftest import AWS
from .helpers import (
DATABASE_APP_NAME,
backup_operations,
construct_endpoint,
db_connect,
get_password,
get_primary,
Expand All @@ -40,60 +37,12 @@

logger = logging.getLogger(__name__)

AWS = "AWS"
GCP = "GCP"


@pytest.fixture(scope="module")
async def cloud_configs() -> None:
# Define some configurations and credentials.
configs = {
AWS: {
"endpoint": "https://s3.amazonaws.com",
"bucket": "data-charms-testing",
"path": f"/postgresql-vm/{uuid.uuid1()}",
"region": "us-east-1",
},
GCP: {
"endpoint": "https://storage.googleapis.com",
"bucket": "data-charms-testing",
"path": f"/postgresql-vm/{uuid.uuid1()}",
"region": "",
},
}
credentials = {
AWS: {
"access-key": os.environ["AWS_ACCESS_KEY"],
"secret-key": os.environ["AWS_SECRET_KEY"],
},
GCP: {
"access-key": os.environ["GCP_ACCESS_KEY"],
"secret-key": os.environ["GCP_SECRET_KEY"],
},
}
yield configs, credentials
# Delete the previously created objects.
logger.info("deleting the previously created backups")
for cloud, config in configs.items():
session = boto3.session.Session(
aws_access_key_id=credentials[cloud]["access-key"],
aws_secret_access_key=credentials[cloud]["secret-key"],
region_name=config["region"],
)
s3 = session.resource(
"s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"])
)
bucket = s3.Bucket(config["bucket"])
# GCS doesn't support batch delete operation, so delete the objects one by one.
for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")):
bucket_object.delete()


@pytest.mark.abort_on_fail
async def test_backup_aws(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None:
async def test_backup_aws(ops_test: OpsTest, aws_cloud_configs: tuple[dict, dict], charm) -> None:
"""Build and deploy two units of PostgreSQL in AWS, test backup and restore actions."""
config = cloud_configs[0][AWS]
credentials = cloud_configs[1][AWS]
config = aws_cloud_configs[0]
credentials = aws_cloud_configs[1]

await backup_operations(
ops_test,
Expand Down
70 changes: 11 additions & 59 deletions tests/integration/test_backups_gcp.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,18 @@
# Copyright 2023 Canonical Ltd.
# See LICENSE file for licensing details.
import logging
import os
import uuid

import boto3
import pytest as pytest
from pytest_operator.plugin import OpsTest
from tenacity import Retrying, stop_after_attempt, wait_exponential

from . import architecture
from .conftest import GCP
from .helpers import (
CHARM_BASE,
DATABASE_APP_NAME,
backup_operations,
construct_endpoint,
db_connect,
get_password,
get_unit_address,
Expand All @@ -39,60 +37,12 @@

logger = logging.getLogger(__name__)

AWS = "AWS"
GCP = "GCP"


@pytest.fixture(scope="module")
async def cloud_configs() -> None:
# Define some configurations and credentials.
configs = {
AWS: {
"endpoint": "https://s3.amazonaws.com",
"bucket": "data-charms-testing",
"path": f"/postgresql-vm/{uuid.uuid1()}",
"region": "us-east-1",
},
GCP: {
"endpoint": "https://storage.googleapis.com",
"bucket": "data-charms-testing",
"path": f"/postgresql-vm/{uuid.uuid1()}",
"region": "",
},
}
credentials = {
AWS: {
"access-key": os.environ["AWS_ACCESS_KEY"],
"secret-key": os.environ["AWS_SECRET_KEY"],
},
GCP: {
"access-key": os.environ["GCP_ACCESS_KEY"],
"secret-key": os.environ["GCP_SECRET_KEY"],
},
}
yield configs, credentials
# Delete the previously created objects.
logger.info("deleting the previously created backups")
for cloud, config in configs.items():
session = boto3.session.Session(
aws_access_key_id=credentials[cloud]["access-key"],
aws_secret_access_key=credentials[cloud]["secret-key"],
region_name=config["region"],
)
s3 = session.resource(
"s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"])
)
bucket = s3.Bucket(config["bucket"])
# GCS doesn't support batch delete operation, so delete the objects one by one.
for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")):
bucket_object.delete()


@pytest.mark.abort_on_fail
async def test_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None:
async def test_backup_gcp(ops_test: OpsTest, gcp_cloud_configs: tuple[dict, dict], charm) -> None:
"""Build and deploy two units of PostgreSQL in GCP, test backup and restore actions."""
config = cloud_configs[0][GCP]
credentials = cloud_configs[1][GCP]
config = gcp_cloud_configs[0]
credentials = gcp_cloud_configs[1]

await backup_operations(
ops_test,
Expand All @@ -114,7 +64,9 @@ async def test_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict], c
await ops_test.model.remove_application(tls_certificates_app_name, block_until_done=True)


async def test_restore_on_new_cluster(ops_test: OpsTest, charm) -> None:
async def test_restore_on_new_cluster(
ops_test: OpsTest, charm, gcp_cloud_configs: tuple[dict, dict]
) -> None:
"""Test that is possible to restore a backup to another PostgreSQL cluster."""
previous_database_app_name = f"{DATABASE_APP_NAME}-gcp"
database_app_name = f"new-{DATABASE_APP_NAME}"
Expand Down Expand Up @@ -212,7 +164,7 @@ async def test_restore_on_new_cluster(ops_test: OpsTest, charm) -> None:


async def test_invalid_config_and_recovery_after_fixing_it(
ops_test: OpsTest, cloud_configs: tuple[dict, dict]
ops_test: OpsTest, gcp_cloud_configs: tuple[dict, dict]
) -> None:
"""Test that the charm can handle invalid and valid backup configurations."""
database_app_name = f"new-{DATABASE_APP_NAME}"
Expand Down Expand Up @@ -243,10 +195,10 @@ async def test_invalid_config_and_recovery_after_fixing_it(
logger.info(
"configuring S3 integrator for a valid cloud, but with the path of another cluster repository"
)
await ops_test.model.applications[S3_INTEGRATOR_APP_NAME].set_config(cloud_configs[0][GCP])
await ops_test.model.applications[S3_INTEGRATOR_APP_NAME].set_config(gcp_cloud_configs[0])
action = await ops_test.model.units.get(f"{S3_INTEGRATOR_APP_NAME}/0").run_action(
"sync-s3-credentials",
**cloud_configs[1][GCP],
**gcp_cloud_configs[1],
)
await action.wait()
logger.info("waiting for the database charm to become blocked")
Expand All @@ -257,7 +209,7 @@ async def test_invalid_config_and_recovery_after_fixing_it(

# Provide valid backup configurations, with another path in the S3 bucket.
logger.info("configuring S3 integrator for a valid cloud")
config = cloud_configs[0][GCP].copy()
config = gcp_cloud_configs[0].copy()
config["path"] = f"/postgresql/{uuid.uuid1()}"
await ops_test.model.applications[S3_INTEGRATOR_APP_NAME].set_config(config)
logger.info("waiting for the database charm to become active")
Expand Down
Loading