From 1a68536d77dc47820deef7f2648c9f4678f68a08 Mon Sep 17 00:00:00 2001 From: Leon <82407168+sed-i@users.noreply.github.com> Date: Wed, 14 Feb 2024 13:47:12 -0500 Subject: [PATCH] Handle restart and status better (#349) * Use prev restart value * Use 'set_ports' instead of k8s service patch * Use collect-status * Drop the deprecated SIMULATE_CAN_CONNECT --- .../v1/kubernetes_service_patch.py | 341 ------------------ src/charm.py | 151 +++++--- tests/unit/test_charm.py | 36 +- tests/unit/test_log_proxy_consumer.py | 2 - 4 files changed, 113 insertions(+), 417 deletions(-) delete mode 100644 lib/charms/observability_libs/v1/kubernetes_service_patch.py diff --git a/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/lib/charms/observability_libs/v1/kubernetes_service_patch.py deleted file mode 100644 index 2cce729ee..000000000 --- a/lib/charms/observability_libs/v1/kubernetes_service_patch.py +++ /dev/null @@ -1,341 +0,0 @@ -# Copyright 2021 Canonical Ltd. -# See LICENSE file for licensing details. - -"""# KubernetesServicePatch Library. - -This library is designed to enable developers to more simply patch the Kubernetes Service created -by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a -service named after the application in the namespace (named after the Juju model). This service by -default contains a "placeholder" port, which is 65536/TCP. - -When modifying the default set of resources managed by Juju, one must consider the lifecycle of the -charm. In this case, any modifications to the default service (created during deployment), will be -overwritten during a charm upgrade. - -When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm` -events which applies the patch to the cluster. This should ensure that the service ports are -correct throughout the charm's life. - -The constructor simply takes a reference to the parent charm, and a list of -[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the -service. For information regarding the `lightkube` `ServicePort` model, please visit the -`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport). - -Optionally, a name of the service (in case service name needs to be patched as well), labels, -selectors, and annotations can be provided as keyword arguments. - -## Getting Started - -To get started using the library, you just need to fetch the library using `charmcraft`. **Note -that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.** - -```shell -cd some-charm -charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch -cat << EOF >> requirements.txt -lightkube -lightkube-models -EOF -``` - -Then, to initialise the library: - -For `ClusterIP` services: - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(443, name=f"{self.app.name}") - self.service_patcher = KubernetesServicePatch(self, [port]) - # ... -``` - -For `LoadBalancer`/`NodePort` services: - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666) - self.service_patcher = KubernetesServicePatch( - self, [port], "LoadBalancer" - ) - # ... -``` - -Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"` - -```python -# ... -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP") - udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP") - sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP") - self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp]) - # ... -``` - -Bound with custom events by providing `refresh_event` argument: -For example, you would like to have a configurable port in your charm and want to apply -service patch every time charm config is changed. - -```python -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch -from lightkube.models.core_v1 import ServicePort - -class SomeCharm(CharmBase): - def __init__(self, *args): - # ... - port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}") - self.service_patcher = KubernetesServicePatch( - self, - [port], - refresh_event=self.on.config_changed - ) - # ... -``` - -Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library -does not try to make any API calls, or open any files during testing that are unlikely to be -present, and could break your tests. The easiest way to do this is during your test `setUp`: - -```python -# ... - -@patch("charm.KubernetesServicePatch", lambda x, y: None) -def setUp(self, *unused): - self.harness = Harness(SomeCharm) - # ... -``` -""" - -import logging -from types import MethodType -from typing import List, Literal, Optional, Union - -from lightkube import ApiError, Client # pyright: ignore -from lightkube.core import exceptions -from lightkube.models.core_v1 import ServicePort, ServiceSpec -from lightkube.models.meta_v1 import ObjectMeta -from lightkube.resources.core_v1 import Service -from lightkube.types import PatchType -from ops.charm import CharmBase -from ops.framework import BoundEvent, Object - -logger = logging.getLogger(__name__) - -# The unique Charmhub library identifier, never change it -LIBID = "0042f86d0a874435adef581806cddbbb" - -# Increment this major API version when introducing breaking changes -LIBAPI = 1 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version -LIBPATCH = 9 - -ServiceType = Literal["ClusterIP", "LoadBalancer"] - - -class KubernetesServicePatch(Object): - """A utility for patching the Kubernetes service set up by Juju.""" - - def __init__( - self, - charm: CharmBase, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - *, - refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, - ): - """Constructor for KubernetesServicePatch. - - Args: - charm: the charm that is instantiating the library. - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - refresh_event: an optional bound event or list of bound events which - will be observed to re-apply the patch (e.g. on port change). - The `install` and `upgrade-charm` events would be observed regardless. - """ - super().__init__(charm, "kubernetes-service-patch") - self.charm = charm - self.service_name = service_name if service_name else self._app - self.service = self._service_object( - ports, - service_name, - service_type, - additional_labels, - additional_selectors, - additional_annotations, - ) - - # Make mypy type checking happy that self._patch is a method - assert isinstance(self._patch, MethodType) - # Ensure this patch is applied during the 'install' and 'upgrade-charm' events - self.framework.observe(charm.on.install, self._patch) - self.framework.observe(charm.on.upgrade_charm, self._patch) - self.framework.observe(charm.on.update_status, self._patch) - - # apply user defined events - if refresh_event: - if not isinstance(refresh_event, list): - refresh_event = [refresh_event] - - for evt in refresh_event: - self.framework.observe(evt, self._patch) - - def _service_object( - self, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - ) -> Service: - """Creates a valid Service representation. - - Args: - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - - Returns: - Service: A valid representation of a Kubernetes Service with the correct ports. - """ - if not service_name: - service_name = self._app - labels = {"app.kubernetes.io/name": self._app} - if additional_labels: - labels.update(additional_labels) - selector = {"app.kubernetes.io/name": self._app} - if additional_selectors: - selector.update(additional_selectors) - return Service( - apiVersion="v1", - kind="Service", - metadata=ObjectMeta( - namespace=self._namespace, - name=service_name, - labels=labels, - annotations=additional_annotations, # type: ignore[arg-type] - ), - spec=ServiceSpec( - selector=selector, - ports=ports, - type=service_type, - ), - ) - - def _patch(self, _) -> None: - """Patch the Kubernetes service created by Juju to map the correct port. - - Raises: - PatchFailed: if patching fails due to lack of permissions, or otherwise. - """ - try: - client = Client() # pyright: ignore - except exceptions.ConfigError as e: - logger.warning("Error creating k8s client: %s", e) - return - - try: - if self._is_patched(client): - return - if self.service_name != self._app: - self._delete_and_create_service(client) - client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE) - except ApiError as e: - if e.status.code == 403: - logger.error("Kubernetes service patch failed: `juju trust` this application.") - else: - logger.error("Kubernetes service patch failed: %s", str(e)) - else: - logger.info("Kubernetes service '%s' patched successfully", self._app) - - def _delete_and_create_service(self, client: Client): - service = client.get(Service, self._app, namespace=self._namespace) - service.metadata.name = self.service_name # type: ignore[attr-defined] - service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501 - client.delete(Service, self._app, namespace=self._namespace) - client.create(service) - - def is_patched(self) -> bool: - """Reports if the service patch has been applied. - - Returns: - bool: A boolean indicating if the service patch has been applied. - """ - client = Client() # pyright: ignore - return self._is_patched(client) - - def _is_patched(self, client: Client) -> bool: - # Get the relevant service from the cluster - try: - service = client.get(Service, name=self.service_name, namespace=self._namespace) - except ApiError as e: - if e.status.code == 404 and self.service_name != self._app: - return False - logger.error("Kubernetes service get failed: %s", str(e)) - raise - - # Construct a list of expected ports, should the patch be applied - expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] # type: ignore[attr-defined] - # Construct a list in the same manner, using the fetched service - fetched_ports = [ - (p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined] - ] # noqa: E501 - return expected_ports == fetched_ports - - @property - def _app(self) -> str: - """Name of the current Juju application. - - Returns: - str: A string containing the name of the current Juju application. - """ - return self.charm.app.name - - @property - def _namespace(self) -> str: - """The Kubernetes namespace we're running in. - - Returns: - str: A string containing the name of the current Kubernetes namespace. - """ - with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: - return f.read().strip() diff --git a/src/charm.py b/src/charm.py index c2ad752fc..f0fda8d83 100755 --- a/src/charm.py +++ b/src/charm.py @@ -19,7 +19,7 @@ import time import urllib.request from pathlib import Path -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Tuple, TypedDict, cast from urllib.error import HTTPError, URLError from urllib.parse import urlparse @@ -39,10 +39,6 @@ ResourceRequirements, adjust_resource_requirements, ) -from charms.observability_libs.v1.kubernetes_service_patch import ( - KubernetesServicePatch, - ServicePort, -) from charms.prometheus_k8s.v0.prometheus_scrape import MetricsEndpointProvider from charms.tempo_k8s.v1.charm_tracing import trace_charm from charms.tempo_k8s.v1.tracing import TracingEndpointRequirer @@ -55,14 +51,43 @@ RULES_DIR, ConfigBuilder, ) +from ops import CollectStatusEvent, StoredState from ops.charm import CharmBase from ops.main import main -from ops.model import ActiveStatus, BlockedStatus, WaitingStatus -from ops.pebble import ChangeError, Error, Layer, PathError, ProtocolError +from ops.model import ( + ActiveStatus, + BlockedStatus, + MaintenanceStatus, + Port, + StatusBase, + WaitingStatus, +) +from ops.pebble import Error, Layer, PathError, ProtocolError logger = logging.getLogger(__name__) +class CompositeStatus(TypedDict): + """Per-component status holder.""" + + # These are going to go into stored state, so we must use marshallable objects. + # They are passed to StatusBase.from_name(). + k8s_patch: Tuple[str, str] + config: Tuple[str, str] + rules: Tuple[str, str] + + +def to_tuple(status: StatusBase) -> Tuple[str, str]: + """Convert a StatusBase to tuple, so it is marshallable into StoredState.""" + return status.name, status.message + + +def to_status(tpl: Tuple[str, str]) -> StatusBase: + """Convert a tuple to a StatusBase, so it could be used natively with ops.""" + name, message = tpl + return StatusBase.from_name(name, message) + + @trace_charm( tracing_endpoint="tracing_endpoint", server_cert="server_cert_path", @@ -78,12 +103,24 @@ class LokiOperatorCharm(CharmBase): """Charm the service.""" + _stored = StoredState() _port = HTTP_LISTEN_PORT _name = "loki" _ca_cert_path = "/usr/local/share/ca-certificates/cos-ca.crt" def __init__(self, *args): super().__init__(*args) + + # We need stored state for push statuses. + # https://discourse.charmhub.io/t/its-probably-ok-for-a-unit-to-go-into-error-state/13022 + self._stored.set_default( + status=CompositeStatus( + k8s_patch=to_tuple(ActiveStatus()), + config=to_tuple(ActiveStatus()), + rules=to_tuple(ActiveStatus()), + ) + ) + self._container = self.unit.get_container(self._name) # If Loki is run in single-tenant mode, all the chunks are put in a folder named "fake" @@ -92,9 +129,7 @@ def __init__(self, *args): tenant_id = "fake" self.rules_dir_tenant = os.path.join(RULES_DIR, tenant_id) - self.service_patch = KubernetesServicePatch( - self, [ServicePort(self._port, name=self.app.name)] - ) + self.unit.set_ports(Port("tcp", self._port)) self.resources_patch = KubernetesComputeResourcesPatch( self, @@ -171,10 +206,20 @@ def __init__(self, *args): self._loki_push_api_alert_rules_changed, ) self.framework.observe(self.on.logging_relation_changed, self._on_logging_relation_changed) + self.framework.observe(self.on.collect_unit_status, self._on_collect_unit_status) ############################################## # CHARM HOOKS HANDLERS # ############################################## + + def _on_collect_unit_status(self, event: CollectStatusEvent): + # "Pull" statuses + # TODO refactor _configure to turn the "rules" status into a "pull" status. + + # "Push" statuses + for status in self._stored.status.values(): + event.add_status(to_status(status)) + def _on_config_changed(self, _): self._configure() @@ -317,20 +362,25 @@ def _tls_ready(self) -> bool: ############################################## def _configure(self): # noqa: C901 """Configure Loki charm.""" - restart = False - - if not self.resources_patch.is_ready(): - if isinstance(self.unit.status, ActiveStatus) or self.unit.status.message == "": - self.unit.status = WaitingStatus("Waiting for resource limit patch to apply") - return + # "is_ready" is a racy check, so we do it once here (instead of in collect-status) + if self.resources_patch.is_ready(): + self._stored.status["k8s_patch"] = to_tuple(ActiveStatus()) + else: + if isinstance(to_status(self._stored.status["k8s_patch"]), ActiveStatus): + self._stored.status["k8s_patch"] = to_tuple( + WaitingStatus("Waiting for resource limit patch to apply") + ) - if not self._container.can_connect(): - self.unit.status = WaitingStatus("Waiting for Pebble ready") + # "can_connect" is a racy check, so we do it once here (instead of in collect-status) + if self._container.can_connect(): + self._stored.status["config"] = to_tuple(ActiveStatus()) + else: + self._stored.status["config"] = to_tuple(MaintenanceStatus("Configuring Loki")) return current_layer = self._container.get_plan() new_layer = self._build_pebble_layer - restart = True if current_layer.services != new_layer.services else False + restart = current_layer.services != new_layer.services config = ConfigBuilder( instance_addr=self.hostname, @@ -341,39 +391,23 @@ def _configure(self): # noqa: C901 http_tls=(self.server_cert.cert is not None), ).build() - try: - self._push_certs() - restart = self._update_config(config) - except (ProtocolError, PathError) as e: - self.unit.status = BlockedStatus(str(e)) - return - except Exception as e: - self.unit.status = BlockedStatus(str(e)) - return + # At this point we're already after the can_connect guard, so if the following pebble operations fail, better + # to let the charm go into error state than setting blocked. + self._push_certs() + restart = self._update_config(config) or restart if restart: - try: - self._container.add_layer(self._name, new_layer, combine=True) - self._container.restart(self._name) - logger.info("Loki (re)started") - except ChangeError as e: - msg = f"Failed to restart loki: {e}" # or e.err? - self.unit.status = BlockedStatus(msg) - logger.error(msg) - return + self._container.add_layer(self._name, new_layer, combine=True) + self._container.restart(self._name) + logger.info("Loki (re)started") - # Don't clear alert error states on reconfigure - # but let errors connecting clear after a restart - if ( - isinstance(self.unit.status, BlockedStatus) - and "Errors in alert rule" in self.unit.status.message - ): + if isinstance(to_status(self._stored.status["rules"]), BlockedStatus): # Wait briefly for Loki to come back up and re-check the alert rules # in case an upgrade/refresh caused the check to occur when it wasn't - # ready yet + # ready yet. TODO: use custom pebble notice for "workload ready" event. time.sleep(2) self._check_alert_rules() - return + return # TODO: why do we have a return here? self.ingress_per_unit.provide_ingress_requirements( scheme="https" if self._tls_ready else "http", port=self._port @@ -383,8 +417,6 @@ def _configure(self): # noqa: C901 self.loki_provider.update_endpoint(url=self._external_url) self.catalogue.update_item(item=self._catalogue_item) - self.unit.status = ActiveStatus() - def _update_config(self, config: dict) -> bool: if self._running_config() != config: config_as_yaml = yaml.safe_dump(config) @@ -436,7 +468,7 @@ def _loki_push_api_alert_rules_changed(self, _: LokiPushApiAlertRulesChanged) -> self._regenerate_alert_rules() # Don't try to configure if checking the rules left us in BlockedStatus - if isinstance(self.unit.status, ActiveStatus): + if isinstance(to_status(self._stored.status["rules"]), ActiveStatus): self._configure() def _ensure_alert_rules_path(self) -> bool: @@ -512,27 +544,30 @@ def _check_alert_rules(self): if e.code == 404 and "no rule groups found" in msg: log_msg = "Checking alert rules: No rule groups found" logger.debug(log_msg) - self.unit.status = BlockedStatus(log_msg) + self._stored.status["rules"] = to_tuple(BlockedStatus(log_msg)) return message = "{} - {}".format(e.code, e.msg) # type: ignore logger.error("Checking alert rules: %s", message) - self.unit.status = BlockedStatus("Errors in alert rule groups. Check juju debug-log") + self._stored.status["rules"] = to_tuple( + BlockedStatus("Errors in alert rule groups. Check juju debug-log") + ) return except URLError as e: logger.error("Checking alert rules: %s", e.reason) - self.unit.status = BlockedStatus("Error connecting to Loki. Check juju debug-log") + self._stored.status["rules"] = to_tuple( + BlockedStatus("Error connecting to Loki. Check juju debug-log") + ) return except Exception as e: logger.error("Checking alert rules: %s", e) - self.unit.status = BlockedStatus("Error connecting to Loki. Check juju debug-log") + self._stored.status["rules"] = to_tuple( + BlockedStatus("Error connecting to Loki. Check juju debug-log") + ) return else: - log_msg = "Checking alert rules: Ok" - logger.debug(log_msg) - if isinstance(self.unit.status, BlockedStatus): - self.unit.status = ActiveStatus() - logger.info("Clearing blocked status with successful alert rule check") + logger.debug("Checking alert rules: Ok") + self._stored.status["rules"] = to_tuple(ActiveStatus()) return def _resource_reqs_from_config(self) -> ResourceRequirements: @@ -544,7 +579,7 @@ def _resource_reqs_from_config(self) -> ResourceRequirements: return adjust_resource_requirements(limits, requests, adhere_to_requests=True) def _on_k8s_patch_failed(self, event: K8sResourcePatchFailedEvent): - self.unit.status = BlockedStatus(str(event.message)) + self._stored.status["k8s_patch"] = to_tuple(BlockedStatus(cast(str, event.message))) @property def _loki_version(self) -> Optional[str]: diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index 50f2a5a2b..cc6b26fb8 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -9,16 +9,13 @@ from unittest.mock import Mock, PropertyMock, patch from urllib.error import HTTPError, URLError -import ops.testing import yaml from charm import LOKI_CONFIG as LOKI_CONFIG_PATH from charm import LokiOperatorCharm from helpers import FakeProcessVersionCheck, k8s_resource_multipatch -from ops.model import ActiveStatus, BlockedStatus, Container, WaitingStatus +from ops.model import ActiveStatus, BlockedStatus, Container, MaintenanceStatus from ops.testing import Harness -ops.testing.SIMULATE_CAN_CONNECT = True - METADATA = { "model": "consumer-model", "model_uuid": "20ce8299-3634-4bef-8bd8-5ace6c8816b4", @@ -107,7 +104,6 @@ class TestCharm(unittest.TestCase): - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch.object(Container, "exec", new=FakeProcessVersionCheck) @@ -153,7 +149,8 @@ def test__on_config_cannot_connect(self, mock_loki_config): # Since harness was not started with begin_with_initial_hooks(), this must # be emitted by hand to actually trigger _configure() self.harness.charm.on.config_changed.emit() - self.assertIsInstance(self.harness.charm.unit.status, WaitingStatus) + self.harness.evaluate_status() + self.assertIsInstance(self.harness.charm.unit.status, MaintenanceStatus) @patch("config_builder.ConfigBuilder.build") @k8s_resource_multipatch @@ -164,13 +161,13 @@ def test__on_config_can_connect(self, mock_loki_config): # Since harness was not started with begin_with_initial_hooks(), this must # be emitted by hand to actually trigger _configure() self.harness.charm.on.config_changed.emit() + self.harness.evaluate_status() self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) class TestConfigFile(unittest.TestCase): """Feature: Loki config file in the workload container is rendered by the charm.""" - @patch("charm.KubernetesServicePatch", lambda x, y: None) @patch("socket.getfqdn", new=lambda *args: "fqdn") @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @@ -262,7 +259,6 @@ class TestPebblePlan(unittest.TestCase): Background: TODO """ - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch.object(Container, "exec", new=FakeProcessVersionCheck) @@ -303,7 +299,6 @@ def test_loki_starts_when_cluster_deployed_without_any_relations(self, *_): class TestDelayedPebbleReady(unittest.TestCase): """Feature: Charm code must be resilient to any (reasonable) order of startup event firing.""" - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") def setUp(self, *_): @@ -348,11 +343,13 @@ def tearDown(self): def test_pebble_ready_changes_status_from_waiting_to_active(self): """Scenario: a pebble-ready event is delayed.""" # WHEN all startup hooks except pebble-ready finished - # THEN app status is "Waiting" before pebble-ready - self.assertIsInstance(self.harness.charm.unit.status, WaitingStatus) + # THEN app status is "Maintenance" before pebble-ready + self.harness.evaluate_status() + self.assertIsInstance(self.harness.charm.unit.status, MaintenanceStatus) # AND app status is "Active" after pebble-ready self.harness.container_pebble_ready("loki") + self.harness.evaluate_status() self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) @k8s_resource_multipatch @@ -363,10 +360,12 @@ def test_regular_relation_departed_runs_before_pebble_ready(self): self.harness.remove_relation_unit(self.log_rel_id, "consumer-app/1") # THEN app status is "Waiting" before pebble-ready - self.assertIsInstance(self.harness.charm.unit.status, WaitingStatus) + self.harness.evaluate_status() + self.assertIsInstance(self.harness.charm.unit.status, MaintenanceStatus) # AND app status is "Active" after pebble-ready self.harness.container_pebble_ready("loki") + self.harness.evaluate_status() self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) @k8s_resource_multipatch @@ -377,10 +376,12 @@ def test_regular_relation_broken_runs_before_pebble_ready(self): self.harness.remove_relation(self.log_rel_id) # THEN app status is "Waiting" before pebble-ready - self.assertIsInstance(self.harness.charm.unit.status, WaitingStatus) + self.harness.evaluate_status() + self.assertIsInstance(self.harness.charm.unit.status, MaintenanceStatus) # AND app status is "Active" after pebble-ready self.harness.container_pebble_ready("loki") + self.harness.evaluate_status() self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) @@ -390,7 +391,6 @@ class TestAppRelationData(unittest.TestCase): Background: Consumer charms need to have a URL for downloading promtail. """ - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch.object(Container, "exec", new=FakeProcessVersionCheck) @@ -433,7 +433,6 @@ def test_promtail_url(self): class TestAlertRuleBlockedStatus(unittest.TestCase): """Ensure that Loki 'keeps' BlockedStatus from alert rules until another rules event.""" - @patch("charm.KubernetesServicePatch", lambda x, y: None) @k8s_resource_multipatch @patch("lightkube.core.client.GenericSyncClient") @patch.object(Container, "exec", new=FakeProcessVersionCheck) @@ -483,6 +482,7 @@ def test__alert_rule_errors_appropriately_set_state(self): hdrs=None, # type: ignore ) self._add_alerting_relation() + self.harness.evaluate_status() self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) self.assertEqual( self.harness.charm.unit.status.message, @@ -491,6 +491,7 @@ def test__alert_rule_errors_appropriately_set_state(self): # Emit another config changed to make sure we stay blocked self.harness.charm.on.config_changed.emit() + self.harness.evaluate_status() self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) self.assertEqual( self.harness.charm.unit.status.message, @@ -501,6 +502,7 @@ def test__alert_rule_errors_appropriately_set_state(self): self.mock_request.return_value = BytesIO(initial_bytes="success".encode()) self.harness.charm._loki_push_api_alert_rules_changed(None) + self.harness.evaluate_status() self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) @k8s_resource_multipatch @@ -508,6 +510,7 @@ def test__loki_connection_errors_on_lifecycle_events_appropriately_clear(self): self.harness.charm.on.config_changed.emit() self.mock_request.side_effect = URLError(reason="fubar!") self._add_alerting_relation() + self.harness.evaluate_status() self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) self.assertEqual( self.harness.charm.unit.status.message, @@ -515,7 +518,8 @@ def test__loki_connection_errors_on_lifecycle_events_appropriately_clear(self): ) # Emit another config changed to make sure we unblock - self.harness.charm.on.config_changed.emit() self.mock_request.side_effect = None self.mock_request.return_value = BytesIO(initial_bytes="success".encode()) + self.harness.charm.on.config_changed.emit() + self.harness.evaluate_status() self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) diff --git a/tests/unit/test_log_proxy_consumer.py b/tests/unit/test_log_proxy_consumer.py index eee81b031..5e38230e7 100644 --- a/tests/unit/test_log_proxy_consumer.py +++ b/tests/unit/test_log_proxy_consumer.py @@ -9,7 +9,6 @@ from tempfile import mkdtemp from unittest.mock import mock_open, patch -import ops from charms.loki_k8s.v0 import loki_push_api from charms.loki_k8s.v0.loki_push_api import LogProxyConsumer from ops.charm import CharmBase @@ -18,7 +17,6 @@ from ops.pebble import PathError from ops.testing import Harness -ops.testing.SIMULATE_CAN_CONNECT = True LOG_FILES = ["/var/log/apache2/access.log", "/var/log/alternatives.log", "/var/log/test.log"] HTTP_LISTEN_PORT = 9080