diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py index 3bc2dd8503..9717119030 100644 --- a/lib/charms/data_platform_libs/v0/data_interfaces.py +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -331,7 +331,7 @@ def _on_topic_requested(self, event: TopicRequestedEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 40 +LIBPATCH = 41 PYDEPS = ["ops>=2.0.0"] @@ -609,7 +609,7 @@ def get_group(self, group: str) -> Optional[SecretGroup]: class CachedSecret: """Locally cache a secret. - The data structure is precisely re-using/simulating as in the actual Secret Storage + The data structure is precisely reusing/simulating as in the actual Secret Storage """ KNOWN_MODEL_ERRORS = [MODEL_ERRORS["no_label_and_uri"], MODEL_ERRORS["owner_no_refresh"]] @@ -2363,7 +2363,6 @@ def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> Non def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" if self.secret_fields and self.deleted_label: - _, normal_fields = self._process_secret_fields( relation, self.secret_fields, diff --git a/lib/charms/data_platform_libs/v0/data_models.py b/lib/charms/data_platform_libs/v0/data_models.py index a1dbb8299a..087f6f3c58 100644 --- a/lib/charms/data_platform_libs/v0/data_models.py +++ b/lib/charms/data_platform_libs/v0/data_models.py @@ -168,7 +168,7 @@ class MergedDataBag(ProviderDataBag, RequirerDataBag): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 4 +LIBPATCH = 5 PYDEPS = ["ops>=2.0.0", "pydantic>=1.10,<2"] @@ -209,7 +209,7 @@ def validate_params(cls: Type[T]): """ def decorator( - f: Callable[[CharmBase, ActionEvent, Union[T, ValidationError]], G] + f: Callable[[CharmBase, ActionEvent, Union[T, ValidationError]], G], ) -> Callable[[CharmBase, ActionEvent], G]: @wraps(f) def event_wrapper(self: CharmBase, event: ActionEvent): @@ -287,7 +287,7 @@ def decorator( Optional[Union[UnitModel, ValidationError]], ], G, - ] + ], ) -> Callable[[CharmBase, RelationEvent], G]: @wraps(f) def event_wrapper(self: CharmBase, event: RelationEvent): diff --git a/lib/charms/grafana_k8s/v0/grafana_dashboard.py b/lib/charms/grafana_k8s/v0/grafana_dashboard.py index dfc32ddcb5..cc22f9f7fa 100644 --- a/lib/charms/grafana_k8s/v0/grafana_dashboard.py +++ b/lib/charms/grafana_k8s/v0/grafana_dashboard.py @@ -157,7 +157,7 @@ def __init__(self, *args): self._on_dashboards_changed, ) -Dashboards can be retrieved the :meth:`dashboards`: +Dashboards can be retrieved via the `dashboards` method: It will be returned in the format of: @@ -175,7 +175,6 @@ def __init__(self, *args): The consuming charm should decompress the dashboard. """ -import base64 import hashlib import json import logging @@ -187,7 +186,7 @@ def __init__(self, *args): import tempfile import uuid from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Tuple import yaml from ops.charm import ( @@ -209,6 +208,7 @@ def __init__(self, *args): StoredState, ) from ops.model import Relation +from cosl import LZMABase64, DashboardPath40UID # The unique Charmhub library identifier, never change it LIBID = "c49eb9c7dfef40c7b6235ebd67010a3f" @@ -219,7 +219,9 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 36 +LIBPATCH = 39 + +PYDEPS = ["cosl >= 0.0.50"] logger = logging.getLogger(__name__) @@ -544,357 +546,474 @@ def _validate_relation_by_interface_and_direction( raise Exception("Unexpected RelationDirection: {}".format(expected_relation_role)) -def _encode_dashboard_content(content: Union[str, bytes]) -> str: - if isinstance(content, str): - content = bytes(content, "utf-8") +class CharmedDashboard: + """A helper class for handling dashboards on the requirer (Grafana) side.""" - return base64.b64encode(lzma.compress(content)).decode("utf-8") + @classmethod + def _convert_dashboard_fields(cls, content: str, inject_dropdowns: bool = True) -> str: + """Make sure values are present for Juju topology. + Inserts Juju topology variables and selectors into the template, as well as + a variable for Prometheus. + """ + dict_content = json.loads(content) + datasources = {} + existing_templates = False + + template_dropdowns = ( + TOPOLOGY_TEMPLATE_DROPDOWNS + DATASOURCE_TEMPLATE_DROPDOWNS # type: ignore + if inject_dropdowns + else DATASOURCE_TEMPLATE_DROPDOWNS + ) -def _decode_dashboard_content(encoded_content: str) -> str: - return lzma.decompress(base64.b64decode(encoded_content.encode("utf-8"))).decode() + # If the dashboard has __inputs, get the names to replace them. These are stripped + # from reactive dashboards in GrafanaDashboardAggregator, but charm authors in + # newer charms may import them directly from the marketplace + if "__inputs" in dict_content: + for field in dict_content["__inputs"]: + if "type" in field and field["type"] == "datasource": + datasources[field["name"]] = field["pluginName"].lower() + del dict_content["__inputs"] + + # If no existing template variables exist, just insert our own + if "templating" not in dict_content: + dict_content["templating"] = {"list": list(template_dropdowns)} # type: ignore + else: + # Otherwise, set a flag so we can go back later + existing_templates = True + for template_value in dict_content["templating"]["list"]: + # Build a list of `datasource_name`: `datasource_type` mappings + # The "query" field is actually "prometheus", "loki", "influxdb", etc + if "type" in template_value and template_value["type"] == "datasource": + datasources[template_value["name"]] = template_value["query"].lower() + + # Put our own variables in the template + for d in template_dropdowns: # type: ignore + if d not in dict_content["templating"]["list"]: + dict_content["templating"]["list"].insert(0, d) + + dict_content = cls._replace_template_fields(dict_content, datasources, existing_templates) + return json.dumps(dict_content) + @classmethod + def _replace_template_fields( # noqa: C901 + cls, dict_content: dict, datasources: dict, existing_templates: bool + ) -> dict: + """Make templated fields get cleaned up afterwards. -def _convert_dashboard_fields(content: str, inject_dropdowns: bool = True) -> str: - """Make sure values are present for Juju topology. + If existing datasource variables are present, try to substitute them. + """ + replacements = {"loki": "${lokids}", "prometheus": "${prometheusds}"} + used_replacements = [] # type: List[str] + + # If any existing datasources match types we know, or we didn't find + # any templating variables at all, template them. + if datasources or not existing_templates: + panels = dict_content.get("panels", {}) + if panels: + dict_content["panels"] = cls._template_panels( + panels, replacements, used_replacements, existing_templates, datasources + ) - Inserts Juju topology variables and selectors into the template, as well as - a variable for Prometheus. - """ - dict_content = json.loads(content) - datasources = {} - existing_templates = False - - template_dropdowns = ( - TOPOLOGY_TEMPLATE_DROPDOWNS + DATASOURCE_TEMPLATE_DROPDOWNS # type: ignore - if inject_dropdowns - else DATASOURCE_TEMPLATE_DROPDOWNS - ) + # Find panels nested under rows + rows = dict_content.get("rows", {}) + if rows: + for row_idx, row in enumerate(rows): + if "panels" in row.keys(): + rows[row_idx]["panels"] = cls._template_panels( + row["panels"], + replacements, + used_replacements, + existing_templates, + datasources, + ) + + dict_content["rows"] = rows + + # Finally, go back and pop off the templates we stubbed out + deletions = [] + for tmpl in dict_content["templating"]["list"]: + if tmpl["name"] and tmpl["name"] in used_replacements: + deletions.append(tmpl) + + for d in deletions: + dict_content["templating"]["list"].remove(d) + + return dict_content + + @classmethod + def _template_panels( + cls, + panels: dict, + replacements: dict, + used_replacements: list, + existing_templates: bool, + datasources: dict, + ) -> dict: + """Iterate through a `panels` object and template it appropriately.""" + # Go through all the panels. If they have a datasource set, AND it's one + # that we can convert to ${lokids} or ${prometheusds}, by stripping off the + # ${} templating and comparing the name to the list we built, replace it, + # otherwise, leave it alone. + # + for panel in panels: + if "datasource" not in panel or not panel.get("datasource"): + continue + if not existing_templates: + datasource = panel.get("datasource") + if isinstance(datasource, str): + if "loki" in datasource: + panel["datasource"] = "${lokids}" + elif "grafana" in datasource: + continue + else: + panel["datasource"] = "${prometheusds}" + elif isinstance(datasource, dict): + # In dashboards exported by Grafana 9, datasource type is dict + dstype = datasource.get("type", "") + if dstype == "loki": + panel["datasource"]["uid"] = "${lokids}" + elif dstype == "prometheus": + panel["datasource"]["uid"] = "${prometheusds}" + else: + logger.debug("Unrecognized datasource type '%s'; skipping", dstype) + continue + else: + logger.error("Unknown datasource format: skipping") + continue + else: + if isinstance(panel["datasource"], str): + if panel["datasource"].lower() in replacements.values(): + # Already a known template variable + continue + # Strip out variable characters and maybe braces + ds = re.sub(r"(\$|\{|\})", "", panel["datasource"]) + + if ds not in datasources.keys(): + # Unknown, non-templated datasource, potentially a Grafana builtin + continue + + replacement = replacements.get(datasources[ds], "") + if replacement: + used_replacements.append(ds) + panel["datasource"] = replacement or panel["datasource"] + elif isinstance(panel["datasource"], dict): + dstype = panel["datasource"].get("type", "") + if panel["datasource"].get("uid", "").lower() in replacements.values(): + # Already a known template variable + continue + # Strip out variable characters and maybe braces + ds = re.sub(r"(\$|\{|\})", "", panel["datasource"].get("uid", "")) + + if ds not in datasources.keys(): + # Unknown, non-templated datasource, potentially a Grafana builtin + continue + + replacement = replacements.get(datasources[ds], "") + if replacement: + used_replacements.append(ds) + panel["datasource"]["uid"] = replacement + else: + logger.error("Unknown datasource format: skipping") + continue + return panels - # If the dashboard has __inputs, get the names to replace them. These are stripped - # from reactive dashboards in GrafanaDashboardAggregator, but charm authors in - # newer charms may import them directly from the marketplace - if "__inputs" in dict_content: - for field in dict_content["__inputs"]: - if "type" in field and field["type"] == "datasource": - datasources[field["name"]] = field["pluginName"].lower() - del dict_content["__inputs"] - - # If no existing template variables exist, just insert our own - if "templating" not in dict_content: - dict_content["templating"] = {"list": list(template_dropdowns)} # type: ignore - else: - # Otherwise, set a flag so we can go back later - existing_templates = True - for template_value in dict_content["templating"]["list"]: - # Build a list of `datasource_name`: `datasource_type` mappings - # The "query" field is actually "prometheus", "loki", "influxdb", etc - if "type" in template_value and template_value["type"] == "datasource": - datasources[template_value["name"]] = template_value["query"].lower() + @classmethod + def _inject_labels(cls, content: str, topology: dict, transformer: "CosTool") -> str: + """Inject Juju topology into panel expressions via CosTool. - # Put our own variables in the template - for d in template_dropdowns: # type: ignore - if d not in dict_content["templating"]["list"]: - dict_content["templating"]["list"].insert(0, d) + A dashboard will have a structure approximating: + { + "__inputs": [], + "templating": { + "list": [ + { + "name": "prometheusds", + "type": "prometheus" + } + ] + }, + "panels": [ + { + "foo": "bar", + "targets": [ + { + "some": "field", + "expr": "up{job="foo"}" + }, + { + "some_other": "field", + "expr": "sum(http_requests_total{instance="$foo"}[5m])} + } + ], + "datasource": "${someds}" + } + ] + } - dict_content = _replace_template_fields(dict_content, datasources, existing_templates) - return json.dumps(dict_content) + `templating` is used elsewhere in this library, but the structure is not rigid. It is + not guaranteed that a panel will actually have any targets (it could be a "spacer" with + no datasource, hence no expression). It could have only one target. It could have multiple + targets. It could have multiple targets of which only one has an `expr` to evaluate. We need + to try to handle all of these concisely. + `cos-tool` (`github.com/canonical/cos-tool` as a Go module in general) + does not know "Grafana-isms", such as using `[$_variable]` to modify the query from the user + interface, so we add placeholders (as `5y`, since it must parse, but a dashboard looking for + five years for a panel query would be unusual). -def _replace_template_fields( # noqa: C901 - dict_content: dict, datasources: dict, existing_templates: bool -) -> dict: - """Make templated fields get cleaned up afterwards. + Args: + content: dashboard content as a string + topology: a dict containing topology values + transformer: a 'CosTool' instance + Returns: + dashboard content with replaced values. + """ + dict_content = json.loads(content) - If existing datasource variables are present, try to substitute them. - """ - replacements = {"loki": "${lokids}", "prometheus": "${prometheusds}"} - used_replacements = [] # type: List[str] - - # If any existing datasources match types we know, or we didn't find - # any templating variables at all, template them. - if datasources or not existing_templates: - panels = dict_content.get("panels", {}) - if panels: - dict_content["panels"] = _template_panels( - panels, replacements, used_replacements, existing_templates, datasources - ) + if "panels" not in dict_content.keys(): + return json.dumps(dict_content) - # Find panels nested under rows - rows = dict_content.get("rows", {}) - if rows: - for row_idx, row in enumerate(rows): - if "panels" in row.keys(): - rows[row_idx]["panels"] = _template_panels( - row["panels"], - replacements, - used_replacements, - existing_templates, - datasources, - ) - - dict_content["rows"] = rows - - # Finally, go back and pop off the templates we stubbed out - deletions = [] - for tmpl in dict_content["templating"]["list"]: - if tmpl["name"] and tmpl["name"] in used_replacements: - deletions.append(tmpl) - - for d in deletions: - dict_content["templating"]["list"].remove(d) - - return dict_content - - -def _template_panels( - panels: dict, - replacements: dict, - used_replacements: list, - existing_templates: bool, - datasources: dict, -) -> dict: - """Iterate through a `panels` object and template it appropriately.""" - # Go through all the panels. If they have a datasource set, AND it's one - # that we can convert to ${lokids} or ${prometheusds}, by stripping off the - # ${} templating and comparing the name to the list we built, replace it, - # otherwise, leave it alone. - # - for panel in panels: - if "datasource" not in panel or not panel.get("datasource"): - continue - if not existing_templates: - datasource = panel.get("datasource") - if isinstance(datasource, str): - if "loki" in datasource: - panel["datasource"] = "${lokids}" - elif "grafana" in datasource: - continue - else: - panel["datasource"] = "${prometheusds}" - elif isinstance(datasource, dict): - # In dashboards exported by Grafana 9, datasource type is dict - dstype = datasource.get("type", "") - if dstype == "loki": - panel["datasource"]["uid"] = "${lokids}" - elif dstype == "prometheus": - panel["datasource"]["uid"] = "${prometheusds}" - else: - logger.debug("Unrecognized datasource type '%s'; skipping", dstype) - continue - else: - logger.error("Unknown datasource format: skipping") + # Go through all the panels and inject topology labels + # Panels may have more than one 'target' where the expressions live, so that must be + # accounted for. Additionally, `promql-transform` does not necessarily gracefully handle + # expressions with range queries including variables. Exclude these. + # + # It is not a certainty that the `datasource` field will necessarily reflect the type, so + # operate on all fields. + panels = dict_content["panels"] + topology_with_prefix = {"juju_{}".format(k): v for k, v in topology.items()} + + # We need to use an index so we can insert the changed element back later + for panel_idx, panel in enumerate(panels): + if not isinstance(panel, dict): continue - else: - if isinstance(panel["datasource"], str): - if panel["datasource"].lower() in replacements.values(): - # Already a known template variable - continue - # Strip out variable characters and maybe braces - ds = re.sub(r"(\$|\{|\})", "", panel["datasource"]) - if ds not in datasources.keys(): - # Unknown, non-templated datasource, potentially a Grafana builtin - continue + # Use the index to insert it back in the same location + panels[panel_idx] = cls._modify_panel(panel, topology_with_prefix, transformer) - replacement = replacements.get(datasources[ds], "") - if replacement: - used_replacements.append(ds) - panel["datasource"] = replacement or panel["datasource"] - elif isinstance(panel["datasource"], dict): - dstype = panel["datasource"].get("type", "") - if panel["datasource"].get("uid", "").lower() in replacements.values(): - # Already a known template variable - continue - # Strip out variable characters and maybe braces - ds = re.sub(r"(\$|\{|\})", "", panel["datasource"].get("uid", "")) + return json.dumps(dict_content) - if ds not in datasources.keys(): - # Unknown, non-templated datasource, potentially a Grafana builtin - continue + @classmethod + def _modify_panel(cls, panel: dict, topology: dict, transformer: "CosTool") -> dict: + """Inject Juju topology into panel expressions via CosTool. - replacement = replacements.get(datasources[ds], "") - if replacement: - used_replacements.append(ds) - panel["datasource"]["uid"] = replacement - else: - logger.error("Unknown datasource format: skipping") - continue - return panels + Args: + panel: a dashboard panel as a dict + topology: a dict containing topology values + transformer: a 'CosTool' instance + Returns: + the panel with injected values + """ + if "targets" not in panel.keys(): + return panel + # Pre-compile a regular expression to grab values from inside of [] + range_re = re.compile(r"\[(?P.*?)\]") + # Do the same for any offsets + offset_re = re.compile(r"offset\s+(?P-?\s*[$\w]+)") -def _inject_labels(content: str, topology: dict, transformer: "CosTool") -> str: - """Inject Juju topology into panel expressions via CosTool. + known_datasources = {"${prometheusds}": "promql", "${lokids}": "logql"} - A dashboard will have a structure approximating: - { - "__inputs": [], - "templating": { - "list": [ - { - "name": "prometheusds", - "type": "prometheus" - } - ] - }, - "panels": [ - { - "foo": "bar", - "targets": [ - { - "some": "field", - "expr": "up{job="foo"}" - }, - { - "some_other": "field", - "expr": "sum(http_requests_total{instance="$foo"}[5m])} - } - ], - "datasource": "${someds}" - } - ] - } + targets = panel["targets"] - `templating` is used elsewhere in this library, but the structure is not rigid. It is - not guaranteed that a panel will actually have any targets (it could be a "spacer" with - no datasource, hence no expression). It could have only one target. It could have multiple - targets. It could have multiple targets of which only one has an `expr` to evaluate. We need - to try to handle all of these concisely. + # We need to use an index so we can insert the changed element back later + for idx, target in enumerate(targets): + # If there's no expression, we don't need to do anything + if "expr" not in target.keys(): + continue + expr = target["expr"] - `cos-tool` (`github.com/canonical/cos-tool` as a Go module in general) - does not know "Grafana-isms", such as using `[$_variable]` to modify the query from the user - interface, so we add placeholders (as `5y`, since it must parse, but a dashboard looking for - five years for a panel query would be unusual). + if "datasource" not in panel.keys(): + continue - Args: - content: dashboard content as a string - topology: a dict containing topology values - transformer: a 'CosTool' instance - Returns: - dashboard content with replaced values. - """ - dict_content = json.loads(content) + if isinstance(panel["datasource"], str): + if panel["datasource"] not in known_datasources: + continue + querytype = known_datasources[panel["datasource"]] + elif isinstance(panel["datasource"], dict): + if panel["datasource"]["uid"] not in known_datasources: + continue + querytype = known_datasources[panel["datasource"]["uid"]] + else: + logger.error("Unknown datasource format: skipping") + continue - if "panels" not in dict_content.keys(): - return json.dumps(dict_content) + # Capture all values inside `[]` into a list which we'll iterate over later to + # put them back in-order. Then apply the regex again and replace everything with + # `[5y]` so promql/parser will take it. + # + # Then do it again for offsets + range_values = [m.group("value") for m in range_re.finditer(expr)] + expr = range_re.sub(r"[5y]", expr) + + offset_values = [m.group("value") for m in offset_re.finditer(expr)] + expr = offset_re.sub(r"offset 5y", expr) + # Retrieve the new expression (which may be unchanged if there were no label + # matchers in the expression, or if tt was unable to be parsed like logql. It's + # virtually impossible to tell from any datasource "name" in a panel what the + # actual type is without re-implementing a complete dashboard parser, but no + # harm will some from passing invalid promql -- we'll just get the original back. + # + replacement = transformer.inject_label_matchers(expr, topology, querytype) - # Go through all the panels and inject topology labels - # Panels may have more than one 'target' where the expressions live, so that must be - # accounted for. Additionally, `promql-transform` does not necessarily gracefully handle - # expressions with range queries including variables. Exclude these. - # - # It is not a certainty that the `datasource` field will necessarily reflect the type, so - # operate on all fields. - panels = dict_content["panels"] - topology_with_prefix = {"juju_{}".format(k): v for k, v in topology.items()} + if replacement == target["expr"]: + # promql-transform caught an error. Move on + continue - # We need to use an index so we can insert the changed element back later - for panel_idx, panel in enumerate(panels): - if not isinstance(panel, dict): - continue + # Go back and substitute values in [] which were pulled out + # Enumerate with an index... again. The same regex is ok, since it will still match + # `[(.*?)]`, which includes `[5y]`, our placeholder + for i, match in enumerate(range_re.finditer(replacement)): + # Replace one-by-one, starting from the left. We build the string back with + # `str.replace(string_to_replace, replacement_value, count)`. Limit the count + # to one, since we are going through one-by-one through the list we saved earlier + # in `range_values`. + replacement = replacement.replace( + "[{}]".format(match.group("value")), + "[{}]".format(range_values[i]), + 1, + ) - # Use the index to insert it back in the same location - panels[panel_idx] = _modify_panel(panel, topology_with_prefix, transformer) + for i, match in enumerate(offset_re.finditer(replacement)): + # Replace one-by-one, starting from the left. We build the string back with + # `str.replace(string_to_replace, replacement_value, count)`. Limit the count + # to one, since we are going through one-by-one through the list we saved earlier + # in `range_values`. + replacement = replacement.replace( + "offset {}".format(match.group("value")), + "offset {}".format(offset_values[i]), + 1, + ) - return json.dumps(dict_content) + # Use the index to insert it back in the same location + targets[idx]["expr"] = replacement + panel["targets"] = targets + return panel -def _modify_panel(panel: dict, topology: dict, transformer: "CosTool") -> dict: - """Inject Juju topology into panel expressions via CosTool. + @classmethod + def _content_to_dashboard_object( + cls, + *, + charm_name, + content: str, + juju_topology: dict, + inject_dropdowns: bool = True, + dashboard_alt_uid: Optional[str] = None, + ) -> Dict: + """Helper method for keeping a consistent stored state schema for the dashboard and some metadata. - Args: - panel: a dashboard panel as a dict - topology: a dict containing topology values - transformer: a 'CosTool' instance - Returns: - the panel with injected values - """ - if "targets" not in panel.keys(): - return panel + Args: + charm_name: Charm name (although the aggregator passes the app name). + content: The compressed dashboard. + juju_topology: This is not actually used in the dashboards, but is present to provide a secondary + salt to ensure uniqueness in the dict keys in case individual charm units provide dashboards. + inject_dropdowns: Whether to auto-render topology dropdowns. + dashboard_alt_uid: Alternative uid used for dashboards added programmatically. + """ + ret = { + "charm": charm_name, + "content": content, + "juju_topology": juju_topology if inject_dropdowns else {}, + "inject_dropdowns": inject_dropdowns, + } - # Pre-compile a regular expression to grab values from inside of [] - range_re = re.compile(r"\[(?P.*?)\]") - # Do the same for any offsets - offset_re = re.compile(r"offset\s+(?P-?\s*[$\w]+)") + if dashboard_alt_uid is not None: + ret["dashboard_alt_uid"] = dashboard_alt_uid - known_datasources = {"${prometheusds}": "promql", "${lokids}": "logql"} + return ret - targets = panel["targets"] + @classmethod + def _generate_alt_uid(cls, charm_name: str, key: str) -> str: + """Generate alternative uid for dashboards. - # We need to use an index so we can insert the changed element back later - for idx, target in enumerate(targets): - # If there's no expression, we don't need to do anything - if "expr" not in target.keys(): - continue - expr = target["expr"] + Args: + charm_name: The name of the charm (not app; from metadata). + key: A string used (along with charm.meta.name) to build the hash uid. - if "datasource" not in panel.keys(): - continue + Returns: A hash string. + """ + raw_dashboard_alt_uid = "{}-{}".format(charm_name, key) + return hashlib.shake_256(raw_dashboard_alt_uid.encode("utf-8")).hexdigest(8) - if isinstance(panel["datasource"], str): - if panel["datasource"] not in known_datasources: - continue - querytype = known_datasources[panel["datasource"]] - elif isinstance(panel["datasource"], dict): - if panel["datasource"]["uid"] not in known_datasources: - continue - querytype = known_datasources[panel["datasource"]["uid"]] + @classmethod + def _replace_uid( + cls, *, dashboard_dict: dict, dashboard_path: Path, charm_dir: Path, charm_name: str + ): + # If we're running this from within an aggregator (such as grafana agent), then the uid was + # already rendered there, so we do not want to overwrite it with a uid generated from aggregator's info. + # We overwrite the uid only if it's not a valid "Path40" uid. + if not DashboardPath40UID.is_valid(original_uid := dashboard_dict.get("uid", "")): + rel_path = str( + dashboard_path.relative_to(charm_dir) + if dashboard_path.is_absolute() + else dashboard_path + ) + dashboard_dict["uid"] = DashboardPath40UID.generate(charm_name, rel_path) + logger.debug( + "Processed dashboard '%s': replaced original uid '%s' with '%s'", + dashboard_path, + original_uid, + dashboard_dict["uid"], + ) else: - logger.error("Unknown datasource format: skipping") - continue + logger.debug( + "Processed dashboard '%s': kept original uid '%s'", dashboard_path, original_uid + ) - # Capture all values inside `[]` into a list which we'll iterate over later to - # put them back in-order. Then apply the regex again and replace everything with - # `[5y]` so promql/parser will take it. - # - # Then do it again for offsets - range_values = [m.group("value") for m in range_re.finditer(expr)] - expr = range_re.sub(r"[5y]", expr) - - offset_values = [m.group("value") for m in offset_re.finditer(expr)] - expr = offset_re.sub(r"offset 5y", expr) - # Retrieve the new expression (which may be unchanged if there were no label - # matchers in the expression, or if tt was unable to be parsed like logql. It's - # virtually impossible to tell from any datasource "name" in a panel what the - # actual type is without re-implementing a complete dashboard parser, but no - # harm will some from passing invalid promql -- we'll just get the original back. - # - replacement = transformer.inject_label_matchers(expr, topology, querytype) - - if replacement == target["expr"]: - # promql-tranform caught an error. Move on - continue - - # Go back and substitute values in [] which were pulled out - # Enumerate with an index... again. The same regex is ok, since it will still match - # `[(.*?)]`, which includes `[5y]`, our placeholder - for i, match in enumerate(range_re.finditer(replacement)): - # Replace one-by-one, starting from the left. We build the string back with - # `str.replace(string_to_replace, replacement_value, count)`. Limit the count - # to one, since we are going through one-by-one through the list we saved earlier - # in `range_values`. - replacement = replacement.replace( - "[{}]".format(match.group("value")), - "[{}]".format(range_values[i]), - 1, + @classmethod + def load_dashboards_from_dir( + cls, + *, + dashboards_path: Path, + charm_name: str, + charm_dir: Path, + inject_dropdowns: bool, + juju_topology: dict, + path_filter: Callable[[Path], bool] = lambda p: True, + ) -> dict: + """Load dashboards files from directory into a mapping from "dashboard id" to a so-called "dashboard object".""" + + # Path.glob uses fnmatch on the backend, which is pretty limited, so use a + # custom function for the filter + def _is_dashboard(p: Path) -> bool: + return ( + p.is_file() + and p.name.endswith((".json", ".json.tmpl", ".tmpl")) + and path_filter(p) ) - for i, match in enumerate(offset_re.finditer(replacement)): - # Replace one-by-one, starting from the left. We build the string back with - # `str.replace(string_to_replace, replacement_value, count)`. Limit the count - # to one, since we are going through one-by-one through the list we saved earlier - # in `range_values`. - replacement = replacement.replace( - "offset {}".format(match.group("value")), - "offset {}".format(offset_values[i]), - 1, + dashboard_templates = {} + + for path in filter(_is_dashboard, Path(dashboards_path).glob("*")): + try: + dashboard_dict = json.loads(path.read_bytes()) + except json.JSONDecodeError as e: + logger.error("Failed to load dashboard '%s': %s", path, e) + continue + if type(dashboard_dict) is not dict: + logger.error( + "Invalid dashboard '%s': expected dict, got %s", path, type(dashboard_dict) + ) + + cls._replace_uid( + dashboard_dict=dashboard_dict, + dashboard_path=path, + charm_dir=charm_dir, + charm_name=charm_name, ) - # Use the index to insert it back in the same location - targets[idx]["expr"] = replacement + id = "file:{}".format(path.stem) + dashboard_templates[id] = cls._content_to_dashboard_object( + charm_name=charm_name, + content=LZMABase64.compress(json.dumps(dashboard_dict)), + dashboard_alt_uid=cls._generate_alt_uid(charm_name, id), + inject_dropdowns=inject_dropdowns, + juju_topology=juju_topology, + ) - panel["targets"] = targets - return panel + return dashboard_templates def _type_convert_stored(obj): @@ -1075,16 +1194,19 @@ def add_dashboard(self, content: str, inject_dropdowns: bool = True) -> None: # that the stored state is there when this unit becomes leader. stored_dashboard_templates: Any = self._stored.dashboard_templates # pyright: ignore - encoded_dashboard = _encode_dashboard_content(content) + encoded_dashboard = LZMABase64.compress(content) # Use as id the first chars of the encoded dashboard, so that # it is predictable across units. id = "prog:{}".format(encoded_dashboard[-24:-16]) - stored_dashboard_templates[id] = self._content_to_dashboard_object( - encoded_dashboard, inject_dropdowns + stored_dashboard_templates[id] = CharmedDashboard._content_to_dashboard_object( + charm_name=self._charm.meta.name, + content=encoded_dashboard, + dashboard_alt_uid=CharmedDashboard._generate_alt_uid(self._charm.meta.name, id), + inject_dropdowns=inject_dropdowns, + juju_topology=self._juju_topology, ) - stored_dashboard_templates[id]["dashboard_alt_uid"] = self._generate_alt_uid(id) if self._charm.unit.is_leader(): for dashboard_relation in self._charm.model.relations[self._relation_name]: @@ -1127,38 +1249,22 @@ def _update_all_dashboards_from_dir( if dashboard_id.startswith("file:"): del stored_dashboard_templates[dashboard_id] - # Path.glob uses fnmatch on the backend, which is pretty limited, so use a - # custom function for the filter - def _is_dashboard(p: Path) -> bool: - return p.is_file() and p.name.endswith((".json", ".json.tmpl", ".tmpl")) - - for path in filter(_is_dashboard, Path(self._dashboards_path).glob("*")): - # path = Path(path) - id = "file:{}".format(path.stem) - stored_dashboard_templates[id] = self._content_to_dashboard_object( - _encode_dashboard_content(path.read_bytes()), inject_dropdowns + stored_dashboard_templates.update( + CharmedDashboard.load_dashboards_from_dir( + dashboards_path=Path(self._dashboards_path), + charm_name=self._charm.meta.name, + charm_dir=self._charm.charm_dir, + inject_dropdowns=inject_dropdowns, + juju_topology=self._juju_topology, ) - stored_dashboard_templates[id]["dashboard_alt_uid"] = self._generate_alt_uid(id) - - self._stored.dashboard_templates = stored_dashboard_templates + ) if self._charm.unit.is_leader(): for dashboard_relation in self._charm.model.relations[self._relation_name]: self._upset_dashboards_on_relation(dashboard_relation) - def _generate_alt_uid(self, key: str) -> str: - """Generate alternative uid for dashboards. - - Args: - key: A string used (along with charm.meta.name) to build the hash uid. - - Returns: A hash string. - """ - raw_dashboard_alt_uid = "{}-{}".format(self._charm.meta.name, key) - return hashlib.shake_256(raw_dashboard_alt_uid.encode("utf-8")).hexdigest(8) - def _reinitialize_dashboard_data(self, inject_dropdowns: bool = True) -> None: - """Triggers a reload of dashboard outside of an eventing workflow. + """Triggers a reload of dashboard outside an eventing workflow. Args: inject_dropdowns: a :bool: used to indicate whether topology dropdowns should be added @@ -1231,17 +1337,6 @@ def _upset_dashboards_on_relation(self, relation: Relation) -> None: relation.data[self._charm.app]["dashboards"] = json.dumps(stored_data) - def _content_to_dashboard_object(self, content: str, inject_dropdowns: bool = True) -> Dict: - return { - "charm": self._charm.meta.name, - "content": content, - "juju_topology": self._juju_topology if inject_dropdowns else {}, - "inject_dropdowns": inject_dropdowns, - } - - # This is not actually used in the dashboards, but is present to provide a secondary - # salt to ensure uniqueness in the dict keys in case individual charm units provide - # dashboards @property def _juju_topology(self) -> Dict: return { @@ -1306,7 +1401,7 @@ def __init__( super().__init__(charm, relation_name) self._charm = charm self._relation_name = relation_name - self._tranformer = CosTool(self._charm) + self._transformer = CosTool(self._charm) self._stored.set_default(dashboards={}) # type: ignore @@ -1436,21 +1531,21 @@ def _render_dashboards_and_signal_changed(self, relation: Relation) -> bool: # error = None topology = template.get("juju_topology", {}) try: - content = _decode_dashboard_content(template["content"]) + content = LZMABase64.decompress(template["content"]) inject_dropdowns = template.get("inject_dropdowns", True) content = self._manage_dashboard_uid(content, template) - content = _convert_dashboard_fields(content, inject_dropdowns) + content = CharmedDashboard._convert_dashboard_fields(content, inject_dropdowns) if topology: - content = _inject_labels(content, topology, self._tranformer) + content = CharmedDashboard._inject_labels(content, topology, self._transformer) - content = _encode_dashboard_content(content) + content = LZMABase64.compress(content) except lzma.LZMAError as e: error = str(e) relation_has_invalid_dashboards = True except json.JSONDecodeError as e: error = str(e.msg) - logger.warning("Invalid JSON in Grafana dashboard: {}".format(fname)) + logger.warning("Invalid JSON in Grafana dashboard '{}': {}".format(fname, error)) continue # Prepend the relation name and ID to the dashboard ID to avoid clashes with @@ -1533,7 +1628,7 @@ def _to_external_object(self, relation_id, dashboard): "id": dashboard["original_id"], "relation_id": relation_id, "charm": dashboard["template"]["charm"], - "content": _decode_dashboard_content(dashboard["content"]), + "content": LZMABase64.decompress(dashboard["content"]), } @property @@ -1570,8 +1665,10 @@ def set_peer_data(self, key: str, data: Any) -> None: def get_peer_data(self, key: str) -> Any: """Retrieve information from the peer data bucket instead of `StoredState`.""" - data = self._charm.peers.data[self._charm.app].get(key, "") # type: ignore[attr-defined] - return json.loads(data) if data else {} + if rel := self._charm.peers: # type: ignore[attr-defined] + data = rel.data[self._charm.app].get(key, "") + return json.loads(data) if data else {} + return {} class GrafanaDashboardAggregator(Object): @@ -1662,8 +1759,11 @@ def _upset_dashboards_on_event(self, event: RelationEvent) -> None: return for id in dashboards: - self._stored.dashboard_templates[id] = self._content_to_dashboard_object( # type: ignore - dashboards[id], event + self._stored.dashboard_templates[id] = CharmedDashboard._content_to_dashboard_object( # type: ignore + charm_name=event.app.name, + content=dashboards[id], + inject_dropdowns=True, + juju_topology=self._hybrid_topology(event), ) self._stored.id_mappings[event.app.name] = dashboards # type: ignore @@ -1824,7 +1924,7 @@ def _handle_reactive_dashboards(self, event: RelationEvent) -> Optional[Dict]: from jinja2 import DebugUndefined, Template - content = _encode_dashboard_content( + content = LZMABase64.compress( Template(dash, undefined=DebugUndefined).render(datasource=r"${prometheusds}") # type: ignore ) id = "prog:{}".format(content[-24:-16]) @@ -1855,32 +1955,20 @@ def _maybe_get_builtin_dashboards(self, event: RelationEvent) -> Dict: ) if dashboards_path: - - def is_dashboard(p: Path) -> bool: - return p.is_file() and p.name.endswith((".json", ".json.tmpl", ".tmpl")) - - for path in filter(is_dashboard, Path(dashboards_path).glob("*")): - # path = Path(path) - if event.app.name in path.name: # type: ignore - id = "file:{}".format(path.stem) - builtins[id] = self._content_to_dashboard_object( - _encode_dashboard_content(path.read_bytes()), event - ) + builtins.update( + CharmedDashboard.load_dashboards_from_dir( + dashboards_path=Path(dashboards_path), + charm_name=event.app.name, + charm_dir=self._charm.charm_dir, + inject_dropdowns=True, + juju_topology=self._hybrid_topology(event), + path_filter=lambda path: event.app.name in path.name, + ) + ) return builtins - def _content_to_dashboard_object(self, content: str, event: RelationEvent) -> Dict: - return { - "charm": event.app.name, # type: ignore - "content": content, - "juju_topology": self._juju_topology(event), - "inject_dropdowns": True, - } - - # This is not actually used in the dashboards, but is present to provide a secondary - # salt to ensure uniqueness in the dict keys in case individual charm units provide - # dashboards - def _juju_topology(self, event: RelationEvent) -> Dict: + def _hybrid_topology(self, event: RelationEvent) -> Dict: return { "model": self._charm.model.name, "model_uuid": self._charm.model.uuid, @@ -1999,12 +2087,9 @@ def _get_tool_path(self) -> Optional[Path]: arch = "amd64" if arch == "x86_64" else arch res = "cos-tool-{}".format(arch) try: - path = Path(res).resolve() - path.chmod(0o777) + path = Path(res).resolve(strict=True) return path - except NotImplementedError: - logger.debug("System lacks support for chmod") - except FileNotFoundError: + except (FileNotFoundError, OSError): logger.debug('Could not locate cos-tool at: "{}"'.format(res)) return None diff --git a/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/lib/charms/observability_libs/v1/kubernetes_service_patch.py deleted file mode 100644 index 4d37a38d9f..0000000000 --- a/lib/charms/observability_libs/v1/kubernetes_service_patch.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright 2021 Canonical Ltd. -# See LICENSE file for licensing details. - -"""# [DEPRECATED!] KubernetesServicePatch Library. - -The `kubernetes_service_patch` library is DEPRECATED and will be removed in October 2025. - -For patching the Kubernetes service created by Juju during the deployment of a charm, -`ops.Unit.set_ports` functionality should be used instead. - -""" - -import logging -from types import MethodType -from typing import Any, List, Literal, Optional, Union - -from lightkube import ApiError, Client # pyright: ignore -from lightkube.core import exceptions -from lightkube.models.core_v1 import ServicePort, ServiceSpec -from lightkube.models.meta_v1 import ObjectMeta -from lightkube.resources.core_v1 import Service -from lightkube.types import PatchType -from ops import UpgradeCharmEvent -from ops.charm import CharmBase -from ops.framework import BoundEvent, Object - -logger = logging.getLogger(__name__) - -# The unique Charmhub library identifier, never change it -LIBID = "0042f86d0a874435adef581806cddbbb" - -# Increment this major API version when introducing breaking changes -LIBAPI = 1 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version -LIBPATCH = 13 - -ServiceType = Literal["ClusterIP", "LoadBalancer"] - - -class KubernetesServicePatch(Object): - """A utility for patching the Kubernetes service set up by Juju.""" - - def __init__( - self, - charm: CharmBase, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - *, - refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, - ): - """Constructor for KubernetesServicePatch. - - Args: - charm: the charm that is instantiating the library. - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - refresh_event: an optional bound event or list of bound events which - will be observed to re-apply the patch (e.g. on port change). - The `install` and `upgrade-charm` events would be observed regardless. - """ - logger.warning( - "The ``kubernetes_service_patch v1`` library is DEPRECATED and will be removed " - "in October 2025. For patching the Kubernetes service created by Juju during " - "the deployment of a charm, ``ops.Unit.set_ports`` functionality should be used instead." - ) - super().__init__(charm, "kubernetes-service-patch") - self.charm = charm - self.service_name = service_name or self._app - # To avoid conflicts with the default Juju service, append "-lb" to the service name. - # The Juju application name is retained for the default service created by Juju. - if self.service_name == self._app and service_type == "LoadBalancer": - self.service_name = f"{self._app}-lb" - self.service_type = service_type - self.service = self._service_object( - ports, - self.service_name, - service_type, - additional_labels, - additional_selectors, - additional_annotations, - ) - - # Make mypy type checking happy that self._patch is a method - assert isinstance(self._patch, MethodType) - # Ensure this patch is applied during the 'install' and 'upgrade-charm' events - self.framework.observe(charm.on.install, self._patch) - self.framework.observe(charm.on.upgrade_charm, self._on_upgrade_charm) - self.framework.observe(charm.on.update_status, self._patch) - # Sometimes Juju doesn't clean-up a manually created LB service, - # so we clean it up ourselves just in case. - self.framework.observe(charm.on.remove, self._remove_service) - - # apply user defined events - if refresh_event: - if not isinstance(refresh_event, list): - refresh_event = [refresh_event] - - for evt in refresh_event: - self.framework.observe(evt, self._patch) - - def _service_object( - self, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - ) -> Service: - """Creates a valid Service representation. - - Args: - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - - Returns: - Service: A valid representation of a Kubernetes Service with the correct ports. - """ - if not service_name: - service_name = self._app - labels = {"app.kubernetes.io/name": self._app} - if additional_labels: - labels.update(additional_labels) - selector = {"app.kubernetes.io/name": self._app} - if additional_selectors: - selector.update(additional_selectors) - return Service( - apiVersion="v1", - kind="Service", - metadata=ObjectMeta( - namespace=self._namespace, - name=service_name, - labels=labels, - annotations=additional_annotations, # type: ignore[arg-type] - ), - spec=ServiceSpec( - selector=selector, - ports=ports, - type=service_type, - ), - ) - - def _patch(self, _) -> None: - """Patch the Kubernetes service created by Juju to map the correct port. - - Raises: - PatchFailed: if patching fails due to lack of permissions, or otherwise. - """ - try: - client = Client() # pyright: ignore - except exceptions.ConfigError as e: - logger.warning("Error creating k8s client: %s", e) - return - - try: - if self._is_patched(client): - return - if self.service_name != self._app: - if not self.service_type == "LoadBalancer": - self._delete_and_create_service(client) - else: - self._create_lb_service(client) - client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE) - except ApiError as e: - if e.status.code == 403: - logger.error("Kubernetes service patch failed: `juju trust` this application.") - else: - logger.error("Kubernetes service patch failed: %s", str(e)) - else: - logger.info("Kubernetes service '%s' patched successfully", self._app) - - def _delete_and_create_service(self, client: Client): - service = client.get(Service, self._app, namespace=self._namespace) - service.metadata.name = self.service_name # type: ignore[attr-defined] - service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501 - client.delete(Service, self._app, namespace=self._namespace) - client.create(service) - - def _create_lb_service(self, client: Client): - try: - client.get(Service, self.service_name, namespace=self._namespace) - except ApiError: - client.create(self.service) - - def is_patched(self) -> bool: - """Reports if the service patch has been applied. - - Returns: - bool: A boolean indicating if the service patch has been applied. - """ - client = Client() # pyright: ignore - return self._is_patched(client) - - def _is_patched(self, client: Client) -> bool: - # Get the relevant service from the cluster - try: - service = client.get(Service, name=self.service_name, namespace=self._namespace) - except ApiError as e: - if e.status.code == 404 and self.service_name != self._app: - return False - logger.error("Kubernetes service get failed: %s", str(e)) - raise - - # Construct a list of expected ports, should the patch be applied - expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] # type: ignore[attr-defined] - # Construct a list in the same manner, using the fetched service - fetched_ports = [ - (p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined] - ] # noqa: E501 - return expected_ports == fetched_ports - - def _on_upgrade_charm(self, event: UpgradeCharmEvent): - """Handle the upgrade charm event.""" - # If a charm author changed the service type from LB to ClusterIP across an upgrade, we need to delete the previous LB. - if self.service_type == "ClusterIP": - - client = Client() # pyright: ignore - - # Define a label selector to find services related to the app - selector: dict[str, Any] = {"app.kubernetes.io/name": self._app} - - # Check if any service of type LoadBalancer exists - services = client.list(Service, namespace=self._namespace, labels=selector) - for service in services: - if ( - not service.metadata - or not service.metadata.name - or not service.spec - or not service.spec.type - ): - logger.warning( - "Service patch: skipping resource with incomplete metadata: %s.", service - ) - continue - if service.spec.type == "LoadBalancer": - client.delete(Service, service.metadata.name, namespace=self._namespace) - logger.info(f"LoadBalancer service {service.metadata.name} deleted.") - - # Continue the upgrade flow normally - self._patch(event) - - def _remove_service(self, _): - """Remove a Kubernetes service associated with this charm. - - Specifically designed to delete the load balancer service created by the charm, since Juju only deletes the - default ClusterIP service and not custom services. - - Returns: - None - - Raises: - ApiError: for deletion errors, excluding when the service is not found (404 Not Found). - """ - client = Client() # pyright: ignore - - try: - client.delete(Service, self.service_name, namespace=self._namespace) - logger.info("The patched k8s service '%s' was deleted.", self.service_name) - except ApiError as e: - if e.status.code == 404: - # Service not found, so no action needed - return - # Re-raise for other statuses - raise - - @property - def _app(self) -> str: - """Name of the current Juju application. - - Returns: - str: A string containing the name of the current Juju application. - """ - return self.charm.app.name - - @property - def _namespace(self) -> str: - """The Kubernetes namespace we're running in. - - Returns: - str: A string containing the name of the current Kubernetes namespace. - """ - with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: - return f.read().strip() diff --git a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py index e3d35c6f30..ca554fb2bc 100644 --- a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py +++ b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py @@ -362,7 +362,7 @@ def _on_scrape_targets_changed(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 47 +LIBPATCH = 48 PYDEPS = ["cosl"] @@ -2364,12 +2364,9 @@ def _get_tool_path(self) -> Optional[Path]: arch = "amd64" if arch == "x86_64" else arch res = "cos-tool-{}".format(arch) try: - path = Path(res).resolve() - path.chmod(0o777) + path = Path(res).resolve(strict=True) return path - except NotImplementedError: - logger.debug("System lacks support for chmod") - except FileNotFoundError: + except (FileNotFoundError, OSError): logger.debug('Could not locate cos-tool at: "{}"'.format(res)) return None diff --git a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py index cf8def11ac..a9b6deeb64 100644 --- a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py +++ b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py @@ -10,10 +10,10 @@ in real time from the Grafana dashboard the execution flow of your charm. # Quickstart -Fetch the following charm libs (and ensure the minimum version/revision numbers are satisfied): +Fetch the following charm libs: - charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.tracing # >= 1.10 - charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.charm_tracing # >= 2.7 + charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.tracing + charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.charm_tracing Then edit your charm code to include: @@ -168,9 +168,10 @@ class MyCharm(CharmBase): ... ``` -## Upgrading from `v0` +## Upgrading from `tempo_k8s.v0` -If you are upgrading from `charm_tracing` v0, you need to take the following steps (assuming you already +If you are upgrading from `tempo_k8s.v0.charm_tracing` (note that since then, the charm library moved to +`tempo_coordinator_k8s.v0.charm_tracing`), you need to take the following steps (assuming you already have the newest version of the library in your charm): 1) If you need the dependency for your tests, add the following dependency to your charm project (or, if your project had a dependency on `opentelemetry-exporter-otlp-proto-grpc` only because @@ -183,7 +184,7 @@ class MyCharm(CharmBase): For example: ``` - from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm + from charms.tempo_k8s.v0.charm_tracing import trace_charm @trace_charm( tracing_endpoint="my_tracing_endpoint", @@ -337,7 +338,7 @@ def _remove_stale_otel_sdk_packages(): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 4 +LIBPATCH = 5 PYDEPS = ["opentelemetry-exporter-otlp-proto-http==1.21.0"] diff --git a/lib/charms/tempo_coordinator_k8s/v0/tracing.py b/lib/charms/tempo_coordinator_k8s/v0/tracing.py index 734a4ca0b2..27144fa623 100644 --- a/lib/charms/tempo_coordinator_k8s/v0/tracing.py +++ b/lib/charms/tempo_coordinator_k8s/v0/tracing.py @@ -110,7 +110,7 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 4 +LIBPATCH = 5 PYDEPS = ["pydantic"] @@ -951,7 +951,6 @@ def charm_tracing_config( proceed with charm tracing (with or without tls, as appropriate) Usage: - If you are using charm_tracing >= v1.9: >>> from lib.charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm >>> from lib.charms.tempo_coordinator_k8s.v0.tracing import charm_tracing_config >>> @trace_charm(tracing_endpoint="my_endpoint", cert_path="cert_path") @@ -961,24 +960,6 @@ def charm_tracing_config( >>> self.tracing = TracingEndpointRequirer(...) >>> self.my_endpoint, self.cert_path = charm_tracing_config( ... self.tracing, self._cert_path) - - If you are using charm_tracing < v1.9: - >>> from lib.charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm - >>> from lib.charms.tempo_coordinator_k8s.v0.tracing import charm_tracing_config - >>> @trace_charm(tracing_endpoint="my_endpoint", cert_path="cert_path") - >>> class MyCharm(...): - >>> _cert_path = "/path/to/cert/on/charm/container.crt" - >>> def __init__(self, ...): - >>> self.tracing = TracingEndpointRequirer(...) - >>> self._my_endpoint, self._cert_path = charm_tracing_config( - ... self.tracing, self._cert_path) - >>> @property - >>> def my_endpoint(self): - >>> return self._my_endpoint - >>> @property - >>> def cert_path(self): - >>> return self._cert_path - """ if not endpoint_requirer.is_ready(): return None, None diff --git a/src/charm.py b/src/charm.py index a77f2790df..74f0fb6663 100755 --- a/src/charm.py +++ b/src/charm.py @@ -34,7 +34,6 @@ from charms.data_platform_libs.v0.data_models import TypedCharmBase from charms.grafana_k8s.v0.grafana_dashboard import GrafanaDashboardProvider from charms.loki_k8s.v1.loki_push_api import LogProxyConsumer -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch from charms.postgresql_k8s.v0.postgresql import ( REQUIRED_PLUGINS, PostgreSQL, @@ -51,7 +50,7 @@ from lightkube.models.core_v1 import ServicePort, ServiceSpec from lightkube.models.meta_v1 import ObjectMeta from lightkube.resources.core_v1 import Endpoints, Node, Pod, Service -from ops import main +from ops import JujuVersion, main from ops.charm import ( ActionEvent, HookEvent, @@ -59,7 +58,6 @@ RelationDepartedEvent, WorkloadEvent, ) -from ops.jujuversion import JujuVersion from ops.model import ( ActiveStatus, BlockedStatus, @@ -246,9 +244,11 @@ def __init__(self, *args): relation_name="logging", ) - postgresql_db_port = ServicePort(5432, name="database") - patroni_api_port = ServicePort(8008, name="api") - self.service_patcher = KubernetesServicePatch(self, [postgresql_db_port, patroni_api_port]) + if JujuVersion.from_environ().supports_open_port_on_k8s: + try: + self.unit.set_ports(5432, 8008) + except ModelError: + logger.exception("failed to open port") self.tracing = TracingEndpointRequirer( self, relation_name=TRACING_RELATION_NAME, protocols=[TRACING_PROTOCOL] ) diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 9fb6a2160f..c4cb8e17ac 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -423,11 +423,9 @@ def get_expected_k8s_resources(application: str) -> set: f"Endpoints/patroni-{application}", f"Endpoints/patroni-{application}-config", f"Endpoints/patroni-{application}-sync", - f"Endpoints/{application}", f"Endpoints/{application}-primary", f"Endpoints/{application}-replicas", f"Service/patroni-{application}-config", - f"Service/{application}", f"Service/{application}-primary", f"Service/{application}-replicas", } diff --git a/tests/unit/test_async_replication.py b/tests/unit/test_async_replication.py index c7623a802f..d56d71d98b 100644 --- a/tests/unit/test_async_replication.py +++ b/tests/unit/test_async_replication.py @@ -20,29 +20,27 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) + harness = Harness(PostgresqlOperatorCharm) - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() + # Set up the initial relation and hooks. + harness.set_leader(True) + harness.begin() - yield harness - harness.cleanup() + yield harness + harness.cleanup() @pytest.fixture(autouse=True) def standby(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - harness.set_model_name("standby") + harness = Harness(PostgresqlOperatorCharm) + harness.set_model_name("standby") - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() + # Set up the initial relation and hooks. + harness.set_leader(True) + harness.begin() - yield harness - harness.cleanup() + yield harness + harness.cleanup() @pytest.mark.parametrize("relation_name", RELATION_NAMES) diff --git a/tests/unit/test_backups.py b/tests/unit/test_backups.py index 9ef8aec9ee..33c4bb81be 100644 --- a/tests/unit/test_backups.py +++ b/tests/unit/test_backups.py @@ -26,19 +26,18 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - # Mock generic sync client to avoid search to ~/.kube/config. - patcher = patch("lightkube.core.client.GenericSyncClient") - patcher.start() - - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") - harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") - harness.begin() - yield harness - harness.cleanup() + # Mock generic sync client to avoid search to ~/.kube/config. + patcher = patch("lightkube.core.client.GenericSyncClient") + patcher.start() + + harness = Harness(PostgresqlOperatorCharm) + + # Set up the initial relation and hooks. + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() def test_stanza_name(harness): diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index c26f9eae36..6441089f8f 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -41,14 +41,25 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - harness.handle_exec("postgresql", ["locale", "-a"], result="C") + harness = Harness(PostgresqlOperatorCharm) + harness.handle_exec("postgresql", ["locale", "-a"], result="C") + + harness.add_relation(PEER, "postgresql-k8s") + harness.begin() + harness.add_relation("restart", harness.charm.app.name) + yield harness + harness.cleanup() - harness.add_relation(PEER, "postgresql-k8s") + +def test_set_ports(only_with_juju_secrets): + with ( + patch("charm.JujuVersion") as _juju_version, + patch("charm.PostgresqlOperatorCharm.unit") as _unit, + ): + harness = Harness(PostgresqlOperatorCharm) harness.begin() - harness.add_relation("restart", harness.charm.app.name) - yield harness + _unit.set_ports.assert_called_once_with(5432, 8008) + harness.cleanup() diff --git a/tests/unit/test_db.py b/tests/unit/test_db.py index ddcbec8390..11795397d6 100644 --- a/tests/unit/test_db.py +++ b/tests/unit/test_db.py @@ -24,26 +24,25 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() - - # Define some relations. - rel_id = harness.add_relation(RELATION_NAME, "application") - harness.add_relation_unit(rel_id, "application/0") - peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) - harness.add_relation_unit(peer_rel_id, f"{harness.charm.app.name}/1") - harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) - harness.update_relation_data( - peer_rel_id, - harness.charm.app.name, - {"cluster_initialised": "True"}, - ) - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + + # Set up the initial relation and hooks. + harness.set_leader(True) + harness.begin() + + # Define some relations. + rel_id = harness.add_relation(RELATION_NAME, "application") + harness.add_relation_unit(rel_id, "application/0") + peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) + harness.add_relation_unit(peer_rel_id, f"{harness.charm.app.name}/1") + harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"cluster_initialised": "True"}, + ) + yield harness + harness.cleanup() def clear_relation_data(_harness): @@ -132,57 +131,56 @@ def test_on_relation_changed(harness): def test_get_extensions(harness): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - # Test when there are no extensions in the relation databags. - rel_id = harness.model.get_relation(RELATION_NAME).id - relation = harness.model.get_relation(RELATION_NAME, rel_id) - assert harness.charm.legacy_db_relation._get_extensions(relation) == ([], set()) - - # Test when there are extensions in the application relation databag. - extensions = ["", "citext:public", "debversion"] - with harness.hooks_disabled(): - harness.update_relation_data( - rel_id, - "application", - {"extensions": ",".join(extensions)}, - ) - assert harness.charm.legacy_db_relation._get_extensions(relation) == ( - [extensions[1], extensions[2]], - {extensions[1].split(":")[0], extensions[2]}, + # Test when there are no extensions in the relation databags. + rel_id = harness.model.get_relation(RELATION_NAME).id + relation = harness.model.get_relation(RELATION_NAME, rel_id) + assert harness.charm.legacy_db_relation._get_extensions(relation) == ([], set()) + + # Test when there are extensions in the application relation databag. + extensions = ["", "citext:public", "debversion"] + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + "application", + {"extensions": ",".join(extensions)}, ) + assert harness.charm.legacy_db_relation._get_extensions(relation) == ( + [extensions[1], extensions[2]], + {extensions[1].split(":")[0], extensions[2]}, + ) - # Test when there are extensions in the unit relation databag. - with harness.hooks_disabled(): - harness.update_relation_data( - rel_id, - "application", - {"extensions": ""}, - ) - harness.update_relation_data( - rel_id, - "application/0", - {"extensions": ",".join(extensions)}, - ) - assert harness.charm.legacy_db_relation._get_extensions(relation) == ( - [extensions[1], extensions[2]], - {extensions[1].split(":")[0], extensions[2]}, + # Test when there are extensions in the unit relation databag. + with harness.hooks_disabled(): + harness.update_relation_data( + rel_id, + "application", + {"extensions": ""}, ) - - # Test when one of the plugins/extensions is enabled. - config = """options: - plugin_citext_enable: - default: true - type: boolean - plugin_debversion_enable: - default: false - type: boolean""" - harness = Harness(PostgresqlOperatorCharm, config=config) - harness.cleanup() - harness.begin() - assert harness.charm.legacy_db_relation._get_extensions(relation) == ( - [extensions[1], extensions[2]], - {extensions[2]}, + harness.update_relation_data( + rel_id, + "application/0", + {"extensions": ",".join(extensions)}, ) + assert harness.charm.legacy_db_relation._get_extensions(relation) == ( + [extensions[1], extensions[2]], + {extensions[1].split(":")[0], extensions[2]}, + ) + + # Test when one of the plugins/extensions is enabled. + config = """options: + plugin_citext_enable: + default: true + type: boolean + plugin_debversion_enable: + default: false + type: boolean""" + harness = Harness(PostgresqlOperatorCharm, config=config) + harness.cleanup() + harness.begin() + assert harness.charm.legacy_db_relation._get_extensions(relation) == ( + [extensions[1], extensions[2]], + {extensions[2]}, + ) def test_set_up_relation(harness): diff --git a/tests/unit/test_patroni.py b/tests/unit/test_patroni.py index e000f2bef5..408b0367a1 100644 --- a/tests/unit/test_patroni.py +++ b/tests/unit/test_patroni.py @@ -19,34 +19,32 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - harness.begin() - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + harness.begin() + yield harness + harness.cleanup() @pytest.fixture(autouse=True) def patroni(harness): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - # Setup Patroni wrapper. - patroni = Patroni( - harness.charm, - "postgresql-k8s-0", - ["postgresql-k8s-0", "postgresql-k8s-1", "postgresql-k8s-2"], - "postgresql-k8s-primary.dev.svc.cluster.local", - "test-model", - STORAGE_PATH, - "superuser-password", - "replication-password", - "rewind-password", - False, - "patroni-password", - ) - root = harness.get_filesystem_root("postgresql") - (root / "var" / "log" / "postgresql").mkdir(parents=True, exist_ok=True) + # Setup Patroni wrapper. + patroni = Patroni( + harness.charm, + "postgresql-k8s-0", + ["postgresql-k8s-0", "postgresql-k8s-1", "postgresql-k8s-2"], + "postgresql-k8s-primary.dev.svc.cluster.local", + "test-model", + STORAGE_PATH, + "superuser-password", + "replication-password", + "rewind-password", + False, + "patroni-password", + ) + root = harness.get_filesystem_root("postgresql") + (root / "var" / "log" / "postgresql").mkdir(parents=True, exist_ok=True) - yield patroni + yield patroni # This method will be used by the mock to replace requests.get diff --git a/tests/unit/test_postgresql.py b/tests/unit/test_postgresql.py index ed6665d9d6..565e2a2c21 100644 --- a/tests/unit/test_postgresql.py +++ b/tests/unit/test_postgresql.py @@ -17,15 +17,14 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) + harness = Harness(PostgresqlOperatorCharm) - # Set up the initial relation and hooks. - peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") - harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") - harness.begin() - yield harness - harness.cleanup() + # Set up the initial relation and hooks. + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() def test_create_database(harness): diff --git a/tests/unit/test_postgresql_provider.py b/tests/unit/test_postgresql_provider.py index e56b392387..314eb39f9c 100644 --- a/tests/unit/test_postgresql_provider.py +++ b/tests/unit/test_postgresql_provider.py @@ -25,25 +25,24 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() - - # Define some relations. - rel_id = harness.add_relation(RELATION_NAME, "application") - harness.add_relation_unit(rel_id, "application/0") - peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) - harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) - harness.update_relation_data( - peer_rel_id, - harness.charm.app.name, - {"cluster_initialised": "True"}, - ) - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + + # Set up the initial relation and hooks. + harness.set_leader(True) + harness.begin() + + # Define some relations. + rel_id = harness.add_relation(RELATION_NAME, "application") + harness.add_relation_unit(rel_id, "application/0") + peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) + harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"cluster_initialised": "True"}, + ) + yield harness + harness.cleanup() def request_database(_harness): diff --git a/tests/unit/test_postgresql_tls.py b/tests/unit/test_postgresql_tls.py index e5c4a3f532..afccae24e8 100644 --- a/tests/unit/test_postgresql_tls.py +++ b/tests/unit/test_postgresql_tls.py @@ -17,15 +17,14 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") - harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") - harness.begin() - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + + # Set up the initial relation and hooks. + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() def delete_secrets(_harness): diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index 8035a6dd39..33623048fe 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -20,23 +20,20 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - """Set up the test.""" - patcher = patch("lightkube.core.client.GenericSyncClient") - patcher.start() - harness = Harness(PostgresqlOperatorCharm) - harness.begin() - upgrade_relation_id = harness.add_relation("upgrade", "postgresql-k8s") - peer_relation_id = harness.add_relation("database-peers", "postgresql-k8s") - for rel_id in (upgrade_relation_id, peer_relation_id): - harness.add_relation_unit(rel_id, "postgresql-k8s/1") - harness.add_relation("restart", harness.charm.app.name) - with harness.hooks_disabled(): - harness.update_relation_data( - upgrade_relation_id, "postgresql-k8s/1", {"state": "idle"} - ) - yield harness - harness.cleanup() + """Set up the test.""" + patcher = patch("lightkube.core.client.GenericSyncClient") + patcher.start() + harness = Harness(PostgresqlOperatorCharm) + harness.begin() + upgrade_relation_id = harness.add_relation("upgrade", "postgresql-k8s") + peer_relation_id = harness.add_relation("database-peers", "postgresql-k8s") + for rel_id in (upgrade_relation_id, peer_relation_id): + harness.add_relation_unit(rel_id, "postgresql-k8s/1") + harness.add_relation("restart", harness.charm.app.name) + with harness.hooks_disabled(): + harness.update_relation_data(upgrade_relation_id, "postgresql-k8s/1", {"state": "idle"}) + yield harness + harness.cleanup() def test_is_no_sync_member(harness):