diff --git a/src/dockerflow/checks/__init__.py b/src/dockerflow/checks/__init__.py index 8df8c78..1685f47 100644 --- a/src/dockerflow/checks/__init__.py +++ b/src/dockerflow/checks/__init__.py @@ -16,3 +16,11 @@ Warning, level_to_text, ) +from .registry import ( # noqa + clear_checks, + get_checks, + register, + register_partial, + run_checks, + run_checks_async, +) diff --git a/src/dockerflow/checks/registry.py b/src/dockerflow/checks/registry.py new file mode 100644 index 0000000..6670104 --- /dev/null +++ b/src/dockerflow/checks/registry.py @@ -0,0 +1,213 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, you can obtain one at http://mozilla.org/MPL/2.0/. +import asyncio +import functools +import inspect +import logging +from dataclasses import dataclass +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple + +from .messages import CheckMessage, level_to_text + +logger = logging.getLogger(__name__) + +CheckFn = Callable[..., List[CheckMessage]] + +_REGISTERED_CHECKS = {} + + +def _iscoroutinefunction_or_partial(obj): + """ + Determine if the provided object is a coroutine function or a partial function + that wraps a coroutine function. + + This function should be removed when we drop support for Python 3.7, as this is + handled directly by `inspect.iscoroutinefunction` in Python 3.8. + """ + while isinstance(obj, functools.partial): + obj = obj.func + return inspect.iscoroutinefunction(obj) + + +def register(func=None, name=None): + """ + Register a check callback to be executed from + the heartbeat endpoint. + """ + if func is None: + return functools.partial(register, name=name) + + if name is None: + name = func.__name__ + + logger.debug("Register Dockerflow check %s", name) + + if _iscoroutinefunction_or_partial(func): + + @functools.wraps(func) + async def decorated_function_asyc(*args, **kwargs): + logger.debug("Called Dockerflow check %s", name) + return await func(*args, **kwargs) + + _REGISTERED_CHECKS[name] = decorated_function_asyc + return decorated_function_asyc + + @functools.wraps(func) + def decorated_function(*args, **kwargs): + logger.debug("Called Dockerflow check %s", name) + return func(*args, **kwargs) + + _REGISTERED_CHECKS[name] = decorated_function + return decorated_function + + +def register_partial(func, *args, name=None): + """ + Registers a given check callback that will be called with the provided + arguments using `functools.partial()`. For example: + + .. code-block:: python + + dockerflow.register_partial(check_redis_connected, redis) + + """ + if name is None: + name = func.__name__ + + logger.debug("Register Dockerflow check %s with partially applied arguments" % name) + partial = functools.wraps(func)(functools.partial(func, *args)) + return register(func=partial, name=name) + + +def get_checks(): + return _REGISTERED_CHECKS + + +def clear_checks(): + global _REGISTERED_CHECKS + _REGISTERED_CHECKS = dict() + + +@dataclass +class ChecksResults: + """ + Represents the results of running checks. + + This data class holds the results of running a collection of checks. It includes + details about each check's outcome, their statuses, and the overall result level. + + :param details: A dictionary containing detailed information about each check's + outcome, with check names as keys and dictionaries of details as values. + :type details: Dict[str, Dict[str, Any]] + + :param statuses: A dictionary containing the status of each check, with check names + as keys and statuses as values (e.g., 'pass', 'fail', 'warning'). + :type statuses: Dict[str, str] + + :param level: An integer representing the overall result level of the checks + :type level: int + """ + + details: Dict[str, Dict[str, Any]] + statuses: Dict[str, str] + level: int + + +async def _run_check_async(check): + name, check_fn = check + if _iscoroutinefunction_or_partial(check_fn): + errors = await check_fn() + else: + loop = asyncio.get_event_loop() + errors = await loop.run_in_executor(None, check_fn) + + return (name, errors) + + +async def run_checks_async( + checks: Iterable[Tuple[str, CheckFn]], + silenced_check_ids: Optional[Iterable[str]] = None, +) -> ChecksResults: + """ + Run checks concurrently and return the results. + + Executes a collection of checks concurrently, supporting both synchronous and + asynchronous checks. The results include the outcome of each check and can be + further processed. + + :param checks: An iterable of tuples where each tuple contains a check name and a + check function. + :type checks: Iterable[Tuple[str, CheckFn]] + + :param silenced_check_ids: A list of check IDs that should be omitted from the + results. + :type silenced_check_ids: List[str] + + :return: An instance of ChecksResults containing detailed information about each + check's outcome, their statuses, and the overall result level. + :rtype: ChecksResults + """ + if silenced_check_ids is None: + silenced_check_ids = [] + + tasks = (_run_check_async(check) for check in checks) + results = await asyncio.gather(*tasks) + return _build_results_payload(results, silenced_check_ids) + + +def run_checks( + checks: Iterable[Tuple[str, CheckFn]], + silenced_check_ids: Optional[Iterable[str]] = None, +) -> ChecksResults: + """ + Run checks synchronously and return the results. + + Executes a collection of checks and returns the results. The results include the + outcome of each check and can be further processed. + + :param checks: An iterable of tuples where each tuple contains a check name and a + check function. + :type checks: Iterable[Tuple[str, CheckFn]] + + :param silenced_check_ids: A list of check IDs that should be omitted from the + results. + :type silenced_check_ids: List[str] + + :return: An instance of ChecksResults containing detailed information about each + check's outcome, their statuses, and the overall result level. + :rtype: ChecksResults + """ + if silenced_check_ids is None: + silenced_check_ids = [] + results = [(name, check()) for name, check in checks] + return _build_results_payload(results, silenced_check_ids) + + +def _build_results_payload( + checks_results: Iterable[Tuple[str, Iterable[CheckMessage]]], + silenced_check_ids, +): + details = {} + statuses = {} + max_level = 0 + + for name, errors in checks_results: + # Log check results with appropriate level. + for error in errors: + logger.log(error.level, "%s: %s", error.id, error.msg) + + errors = [e for e in errors if e.id not in silenced_check_ids] + level = max([0] + [e.level for e in errors]) + + detail = { + "status": level_to_text(level), + "level": level, + "messages": {e.id: e.msg for e in errors}, + } + statuses[name] = level_to_text(level) + max_level = max(max_level, level) + if level > 0: + details[name] = detail + + return ChecksResults(statuses=statuses, details=details, level=max_level) diff --git a/src/dockerflow/django/checks.py b/src/dockerflow/django/checks.py index 6b8e821..3b9639a 100644 --- a/src/dockerflow/django/checks.py +++ b/src/dockerflow/django/checks.py @@ -11,18 +11,6 @@ from .. import health -def level_to_text(level): - statuses = { - 0: "ok", - checks.messages.DEBUG: "debug", - checks.messages.INFO: "info", - checks.messages.WARNING: "warning", - checks.messages.ERROR: "error", - checks.messages.CRITICAL: "critical", - } - return statuses.get(level, "unknown") - - def check_database_connected(app_configs, **kwargs): """ A Django check to see if connecting to the configured default @@ -119,7 +107,6 @@ def register(): [ "dockerflow.django.checks.check_database_connected", "dockerflow.django.checks.check_migrations_applied", - # 'dockerflow.django.checks.check_redis_connected', ], ) for check_path in check_paths: diff --git a/src/dockerflow/django/views.py b/src/dockerflow/django/views.py index 5164bc2..cc5d772 100644 --- a/src/dockerflow/django/views.py +++ b/src/dockerflow/django/views.py @@ -4,11 +4,12 @@ import logging from django.conf import settings -from django.core import checks +from django.core.checks.registry import registry as django_check_registry from django.http import HttpResponse, HttpResponseNotFound, JsonResponse from django.utils.module_loading import import_string -from .checks import level_to_text +from dockerflow import checks + from .signals import heartbeat_failed, heartbeat_passed version_callback = getattr( @@ -47,44 +48,25 @@ def heartbeat(request): Any check that returns a warning or worse (error, critical) will return a 500 response. """ - all_checks = checks.registry.registry.get_checks( - include_deployment_checks=not settings.DEBUG + checks_to_run = ( + (check.__name__, lambda: check(app_configs=None)) + for check in django_check_registry.get_checks( + include_deployment_checks=not settings.DEBUG + ) ) - - details = {} - statuses = {} - level = 0 - - for check in all_checks: - check_level, check_errors = heartbeat_check_detail(check) - level_text = level_to_text(check_level) - statuses[check.__name__] = level_text - level = max(level, check_level) - if level > 0: - for error in check_errors: - logger.log(error.level, "%s: %s", error.id, error.msg) - details[check.__name__] = { - "status": level_text, - "level": level, - "messages": {e.id: e.msg for e in check_errors}, - } - - if level < checks.messages.ERROR: + check_results = checks.run_checks( + checks_to_run, + silenced_check_ids=settings.SILENCED_SYSTEM_CHECKS, + ) + if check_results.level < checks.ERROR: status_code = 200 - heartbeat_passed.send(sender=heartbeat, level=level) + heartbeat_passed.send(sender=heartbeat, level=check_results.level) else: status_code = 500 - heartbeat_failed.send(sender=heartbeat, level=level) + heartbeat_failed.send(sender=heartbeat, level=check_results.level) - payload = {"status": level_to_text(level)} + payload = {"status": checks.level_to_text(check_results.level)} if settings.DEBUG: - payload["checks"] = statuses - payload["details"] = details + payload["checks"] = check_results.statuses + payload["details"] = check_results.details return JsonResponse(payload, status=status_code) - - -def heartbeat_check_detail(check): - errors = check(app_configs=None) - errors = list(filter(lambda e: e.id not in settings.SILENCED_SYSTEM_CHECKS, errors)) - level = max([0] + [e.level for e in errors]) - return level, errors diff --git a/src/dockerflow/flask/app.py b/src/dockerflow/flask/app.py index 0e027ba..dd16a83 100644 --- a/src/dockerflow/flask/app.py +++ b/src/dockerflow/flask/app.py @@ -1,18 +1,23 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, you can obtain one at http://mozilla.org/MPL/2.0/. -import functools import logging import os import time import uuid -from collections import OrderedDict +import warnings import flask from werkzeug.exceptions import InternalServerError +from dockerflow import checks + from .. import version -from . import checks +from .checks import ( + check_database_connected, + check_migrations_applied, + check_redis_connected, +) from .signals import heartbeat_failed, heartbeat_passed try: @@ -123,9 +128,6 @@ def __init__( # without pre-configuration. See docs for how to set it up. self.summary_logger = logging.getLogger("request.summary") - # An ordered dictionary for storing custom Dockerflow checks in. - self.checks = OrderedDict() - # A list of IDs of custom Dockerflow checks to ignore in case they # show up. self.silenced_checks = silenced_checks or [] @@ -140,20 +142,11 @@ def __init__( self.init_app(app) # Initialize the built-in checks. if db: - self.init_check(checks.check_database_connected, db) + checks.register_partial(check_database_connected, db) if redis: - self.init_check(checks.check_redis_connected, redis) + checks.register_partial(check_redis_connected, redis) if migrate: - self.init_check(checks.check_migrations_applied, migrate) - - def init_check(self, check, obj): - """ - Adds a given check callback with the provided object to the list - of checks. Useful for built-ins but also advanced custom checks. - """ - self.logger.info("Adding extension check %s" % check.__name__) - check = functools.wraps(check)(functools.partial(check, obj)) - self.check(func=check) + checks.register_partial(check_migrations_applied, migrate) def init_app(self, app): """ @@ -303,11 +296,6 @@ def _lbheartbeat_view(self): """ return "", 200 - def _heartbeat_check_detail(self, check): - errors = list(filter(lambda e: e.id not in self.silenced_checks, check())) - level = max([0] + [e.level for e in errors]) - return level, errors - def _heartbeat_view(self): """ Runs all the registered checks and returns a JSON response with either @@ -316,40 +304,28 @@ def _heartbeat_view(self): Any check that returns a warning or worse (error, critical) will return a 500 response. """ - details = {} - statuses = {} - level = 0 - - for name, check in self.checks.items(): - check_level, check_errors = self._heartbeat_check_detail(check) - level_text = checks.level_to_text(check_level) - statuses[name] = level_text - level = max(level, check_level) - if check_level > 0: - for error in check_errors: - self.logger.log(error.level, "%s: %s", error.id, error.msg) - details[name] = { - "status": level_text, - "level": level, - "messages": {e.id: e.msg for e in check_errors}, - } + + check_results = checks.run_checks( + checks.get_checks().items(), + silenced_check_ids=self.silenced_checks, + ) payload = { - "status": checks.level_to_text(level), - "checks": statuses, - "details": details, + "status": checks.level_to_text(check_results.level), + "checks": check_results.statuses, + "details": check_results.details, } def render(status_code): return flask.make_response(flask.jsonify(payload), status_code) - if level < checks.ERROR: + if check_results.level < checks.ERROR: status_code = 200 - heartbeat_passed.send(self, level=level) + heartbeat_passed.send(self, level=check_results.level) return render(status_code) else: status_code = 500 - heartbeat_failed.send(self, level=level) + heartbeat_failed.send(self, level=check_results.level) raise HeartbeatFailure(response=render(status_code)) def version_callback(self, func): @@ -378,41 +354,29 @@ def my_version(root): """ self._version_callback = func - def check(self, func=None, name=None): + @property + def checks(self): """ - A decorator to register a new Dockerflow check to be run - when the /__heartbeat__ endpoint is called., e.g.:: - - from dockerflow.flask import checks - - @dockerflow.check - def storage_reachable(): - try: - acme.storage.ping() - except SlowConnectionException as exc: - return [checks.Warning(exc.msg, id='acme.health.0002')] - except StorageException as exc: - return [checks.Error(exc.msg, id='acme.health.0001')] - - or using a custom name:: - - @dockerflow.check(name='acme-storage-check) - def storage_reachable(): - # ... - + Backwards compatibility alias. """ - if func is None: - return functools.partial(self.check, name=name) - - if name is None: - name = func.__name__ - - self.logger.info("Registered Dockerflow check %s", name) + message = ( + "`dockerflow.checks` is deprecated, use `checks.get_checks()` instead." + ) + warnings.warn(message, DeprecationWarning) + return checks.get_checks() - @functools.wraps(func) - def decorated_function(*args, **kwargs): - self.logger.info("Called Dockerflow check %s", name) - return func(*args, **kwargs) + def init_check(self, check, obj): + """ + Backwards compatibility method. + """ + message = "`dockerflow.init_check()` is deprecated, use `checks.register_partial()` instead." + warnings.warn(message, DeprecationWarning) + return checks.register_partial(check, obj) - self.checks[name] = decorated_function - return decorated_function + def check(self, func=None, name=None): + """ + Backwards compatibility method. + """ + message = "`dockerflow.check()` is deprecated, use `checks.register()` instead." + warnings.warn(message, DeprecationWarning) + return checks.register(func, name) diff --git a/src/dockerflow/sanic/app.py b/src/dockerflow/sanic/app.py index 7016963..cffc19e 100644 --- a/src/dockerflow/sanic/app.py +++ b/src/dockerflow/sanic/app.py @@ -2,17 +2,18 @@ # License, v. 2.0. If a copy of the MPL was not distributed with this # file, you can obtain one at http://mozilla.org/MPL/2.0/. -import functools import logging import time import uuid -from collections import OrderedDict +import warnings from inspect import isawaitable from sanic import response +from dockerflow import checks + from .. import version -from . import checks +from .checks import check_redis_connected class Dockerflow(object): @@ -94,9 +95,6 @@ def __init__( # without pre-configuration. See docs for how to set it up. self.summary_logger = logging.getLogger("request.summary") - # An ordered dictionary for storing custom Dockerflow checks in. - self.checks = OrderedDict() - # A list of IDs of custom Dockerflow checks to ignore in case they # show up. self.silenced_checks = silenced_checks or [] @@ -111,16 +109,7 @@ def __init__( self.init_app(app) # Initialize the built-in checks. if redis is not None: - self.init_check(checks.check_redis_connected, redis) - - def init_check(self, check, obj): - """ - Adds a given check callback with the provided object to the list - of checks. Useful for built-ins but also advanced custom checks. - """ - self.logger.info("Adding extension check %s" % check.__name__) - partial = functools.wraps(check)(functools.partial(check, obj)) - self.check(func=partial) + checks.register_partial(check_redis_connected, redis) def init_app(self, app): """ @@ -157,7 +146,7 @@ def _exception_handler(self, request, exception): """ extra = self.summary_extra(request) extra["errno"] = 500 - self.summary_logger.error(str(exception), extra=extra) + self.summary_logger.error(str(exception), extra=extra, exc_info=exception) request.ctx.logged = True def summary_extra(self, request): @@ -208,14 +197,6 @@ async def _lbheartbeat_view(self, request): """ return response.raw(b"", 200) - async def _heartbeat_check_detail(self, check): - result = check() - if isawaitable(result): - result = await result - errors = [e for e in result if e.id not in self.silenced_checks] - level = max([0] + [e.level for e in errors]) - return level, errors - async def _heartbeat_view(self, request): """ Runs all the registered checks and returns a JSON response with either @@ -224,31 +205,19 @@ async def _heartbeat_view(self, request): Any check that returns a warning or worse (error, critical) will return a 500 response. """ - details = {} - statuses = {} - level = 0 - - for name, check in self.checks.items(): - check_level, check_errors = await self._heartbeat_check_detail(check) - level_text = checks.level_to_text(check_level) - statuses[name] = level_text - level = max(level, check_level) - if check_level > 0: - for error in check_errors: - self.logger.log(error.level, "%s: %s", error.id, error.msg) - details[name] = { - "status": level_text, - "level": check_level, - "messages": {e.id: e.msg for e in check_errors}, - } + + check_results = await checks.run_checks_async( + checks.get_checks().items(), + silenced_check_ids=self.silenced_checks, + ) payload = { - "status": checks.level_to_text(level), - "checks": statuses, - "details": details, + "status": checks.level_to_text(check_results.level), + "checks": check_results.statuses, + "details": check_results.details, } - if level < checks.ERROR: + if check_results.level < checks.ERROR: status_code = 200 else: status_code = 500 @@ -286,47 +255,29 @@ async def my_version(root): """ self._version_callback = func - def check(self, func=None, name=None): + @property + def checks(self): """ - A decorator to register a new Dockerflow check to be run - when the /__heartbeat__ endpoint is called., e.g.:: - - from dockerflow.sanic import checks - - @dockerflow.check - async def storage_reachable(): - try: - acme.storage.ping() - except SlowConnectionException as exc: - return [checks.Warning(exc.msg, id='acme.health.0002')] - except StorageException as exc: - return [checks.Error(exc.msg, id='acme.health.0001')] - - also works without async:: - - @dockerflow.check - def storage_reachable(): - # ... - - or using a custom name:: - - @dockerflow.check(name='acme-storage-check') - async def storage_reachable(): - # ... - + Backwards compatibility alias. """ - if func is None: - return functools.partial(self.check, name=name) - - if name is None: - name = func.__name__ - - self.logger.info("Registered Dockerflow check %s", name) + message = ( + "`dockerflow.checks` is deprecated, use `checks.get_checks()` instead." + ) + warnings.warn(message, DeprecationWarning) + return checks.get_checks() - @functools.wraps(func) - def decorated_function(*args, **kwargs): - self.logger.info("Called Dockerflow check %s", name) - return func(*args, **kwargs) + def init_check(self, check, obj): + """ + Backwards compatibility method. + """ + message = "`dockerflow.init_check()` is deprecated, use `checks.register_partial()` instead." + warnings.warn(message, DeprecationWarning) + return checks.register_partial(check, obj) - self.checks[name] = decorated_function - return decorated_function + def check(self, func=None, name=None): + """ + Backwards compatibility method. + """ + message = "`dockerflow.check()` is deprecated, use `checks.register()` instead." + warnings.warn(message, DeprecationWarning) + return checks.register(func, name) diff --git a/tests/conftest.py b/tests/conftest.py index f50a993..ec3ae02 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,6 +3,8 @@ # file, you can obtain one at http://mozilla.org/MPL/2.0/. import pytest +import dockerflow.checks.registry + @pytest.fixture def version_content(): @@ -15,3 +17,9 @@ def version_content(): "commit": "", "build": "uri to CI build job", } + + +@pytest.fixture(autouse=True) +def clear_checks(): + yield + dockerflow.checks.registry.clear_checks() diff --git a/tests/core/test_checks.py b/tests/core/test_checks.py new file mode 100644 index 0000000..17eebb3 --- /dev/null +++ b/tests/core/test_checks.py @@ -0,0 +1,109 @@ +from dockerflow import checks + + +def test_run_checks(): + check_fns = ( + ("returns_error", lambda: [checks.Error("my error message", id="my.error")]), + ) + results = checks.run_checks(checks=check_fns) + assert results.level == checks.ERROR + assert results.statuses == {"returns_error": "error"} + assert results.details == { + "returns_error": { + "level": checks.ERROR, + "messages": {"my.error": "my error message"}, + "status": "error", + } + } + + +def test_run_multiple_checks(): + check_fns = ( + ( + "returns_error", + lambda: [checks.Error("my error message", id="my.error")], + ), + ( + "returns_warning", + lambda: [checks.Warning("my warning message", id="my.warning")], + ), + ) + results = checks.run_checks(checks=check_fns) + assert results.level == checks.ERROR + assert results.statuses == {"returns_error": "error", "returns_warning": "warning"} + assert results.details == { + "returns_error": { + "level": checks.ERROR, + "messages": {"my.error": "my error message"}, + "status": "error", + }, + "returns_warning": { + "level": 30, + "messages": {"my.warning": "my warning message"}, + "status": "warning", + }, + } + + +def test_silenced_checks(): + check_fns = ( + ( + "returns_error", + lambda: [checks.Error("my error message", id="my.error")], + ), + ( + "returns_warning", + lambda: [checks.Warning("my warning message", id="my.warning")], + ), + ) + results = checks.run_checks(checks=check_fns, silenced_check_ids=["my.error"]) + assert results.details == { + "returns_warning": { + "level": checks.WARNING, + "messages": { + "my.warning": "my warning message", + }, + "status": "warning", + } + } + + +def test_checks_returns_multiple_messages(): + check_fns = ( + ( + "returns_messages", + lambda: [ + checks.Error("my error message", id="my.error"), + checks.Warning("my warning message", id="my.warning"), + ], + ), + ( + "returns_more_messages", + lambda: [ + checks.Warning("another warning message", id="another.warning"), + ], + ), + ) + results = checks.run_checks(checks=check_fns) + assert results.level == checks.ERROR + assert results.statuses == { + "returns_messages": "error", + "returns_more_messages": "warning", + } + assert results.details == { + "returns_messages": { + "level": checks.ERROR, + "messages": { + "my.warning": "my warning message", + "my.error": "my error message", + }, + "status": "error", + }, + "returns_more_messages": { + "level": checks.WARNING, + "messages": { + "another.warning": "another warning message", + }, + "status": "warning", + }, + } diff --git a/tests/core/test_logging.py b/tests/core/test_logging.py index 1251c3d..124594c 100644 --- a/tests/core/test_logging.py +++ b/tests/core/test_logging.py @@ -6,11 +6,20 @@ import logging.config import os import textwrap +from importlib import reload import jsonschema +import pytest from dockerflow.logging import JsonLogFormatter + +@pytest.fixture() +def reset_logging(): + logging.shutdown() + reload(logging) + + logger_name = "tests" formatter = JsonLogFormatter(logger_name=logger_name) @@ -22,9 +31,8 @@ def assert_records(records): return details -def test_initialization_from_ini(caplog, tmpdir): - ini_content = textwrap.dedent( - """ +def test_initialization_from_ini(reset_logging, caplog, tmpdir): + ini_content = textwrap.dedent(""" [loggers] keys = root @@ -46,8 +54,7 @@ class = StreamHandler [formatter_json] class = dockerflow.logging.JsonLogFormatter - """ - ) + """) ini_file = tmpdir.join("logging.ini") ini_file.write(ini_content) logging.config.fileConfig(str(ini_file)) @@ -147,8 +154,7 @@ def test_ignore_json_message(caplog): # https://mana.mozilla.org/wiki/pages/viewpage.action?pageId=42895640 -JSON_LOGGING_SCHEMA = json.loads( - """ +JSON_LOGGING_SCHEMA = json.loads(""" { "type":"object", "required":["Timestamp"], @@ -220,7 +226,4 @@ def test_ignore_json_message(caplog): } } } -""".replace( - "\\", "\\\\" - ) -) # HACK: Fix escaping for easy copy/paste +""".replace("\\", "\\\\")) # HACK: Fix escaping for easy copy/paste diff --git a/tests/django/settings.py b/tests/django/settings.py index 341b3f3..55e80c7 100644 --- a/tests/django/settings.py +++ b/tests/django/settings.py @@ -6,7 +6,7 @@ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -ROOT_URLCONF = "tests.urls" +ROOT_URLCONF = "tests.django.urls" SECRET_KEY = ( "e4ac21f475d9d1c4b426f82640e7772d7f09fd8caa78919abbfcc4949bd74aa1b48302a3d2162cdd" diff --git a/tests/django/test_django.py b/tests/django/test_django.py index 9bd435d..c3ac859 100644 --- a/tests/django/test_django.py +++ b/tests/django/test_django.py @@ -6,7 +6,6 @@ import pytest import redis -from django import VERSION as django_version from django.core.checks.registry import registry from django.core.exceptions import ImproperlyConfigured from django.db import connection @@ -20,14 +19,17 @@ from dockerflow.django.middleware import DockerflowMiddleware -@pytest.fixture +@pytest.fixture(autouse=True) def reset_checks(): - if django_version[0] < 2: - registry.registered_checks = [] - registry.deployment_checks = [] - else: - registry.registered_checks = set() - registry.deployment_checks = set() + yield + registry.registered_checks = set() + registry.deployment_checks = set() + + +@pytest.fixture(autouse=True) +def setup_request_summary_logger(dockerflow_middleware): + dockerflow_middleware.summary_logger.addHandler(logging.NullHandler()) + dockerflow_middleware.summary_logger.setLevel(logging.INFO) @pytest.fixture @@ -54,9 +56,8 @@ def test_version_missing(dockerflow_middleware, mocker, rf): @pytest.mark.django_db -def test_heartbeat(dockerflow_middleware, reset_checks, rf, settings): - request = rf.get("/__heartbeat__") - response = dockerflow_middleware.process_request(request) +def test_heartbeat(client, settings): + response = client.get("/__heartbeat__") assert response.status_code == 200 settings.DOCKERFLOW_CHECKS = [ @@ -64,8 +65,46 @@ def test_heartbeat(dockerflow_middleware, reset_checks, rf, settings): "tests.django.django_checks.error", ] checks.register() - response = dockerflow_middleware.process_request(request) + response = client.get("/__heartbeat__") assert response.status_code == 500 + content = response.json() + assert content["status"] == "error" + assert content.get("checks") is None + assert content.get("details") is None + + +@pytest.mark.django_db +def test_heartbeat_debug(client, settings): + settings.DOCKERFLOW_CHECKS = [ + "tests.django.django_checks.warning", + "tests.django.django_checks.error", + ] + settings.DEBUG = True + checks.register() + response = client.get("/__heartbeat__") + assert response.status_code == 500 + content = response.json() + assert content["status"] + assert content["checks"] + assert content["details"] + + +@pytest.mark.django_db +def test_heartbeat_silenced(client, settings): + settings.DOCKERFLOW_CHECKS = [ + "tests.django.django_checks.warning", + "tests.django.django_checks.error", + ] + settings.SILENCED_SYSTEM_CHECKS.append("tests.checks.E001") + settings.DEBUG = True + checks.register() + + response = client.get("/__heartbeat__") + assert response.status_code == 200 + content = response.json() + assert content["status"] == "warning" + assert "warning" in content["details"] + assert "error" not in content["details"] @pytest.mark.django_db @@ -77,7 +116,7 @@ def test_heartbeat_logging(dockerflow_middleware, reset_checks, rf, settings, ca ] checks.register() - with caplog.at_level(logging.INFO, logger="dockerflow.django"): + with caplog.at_level(logging.INFO, logger="dockerflow.checks.registry"): dockerflow_middleware.process_request(request) logged = [(record.levelname, record.message) for record in caplog.records] assert ("ERROR", "tests.checks.E001: some error") in logged @@ -95,11 +134,10 @@ def test_lbheartbeat_makes_no_db_queries(dockerflow_middleware, rf): @pytest.mark.django_db -def test_redis_check(dockerflow_middleware, reset_checks, rf, settings): +def test_redis_check(client, settings): settings.DOCKERFLOW_CHECKS = ["dockerflow.django.checks.check_redis_connected"] checks.register() - request = rf.get("/__heartbeat__") - response = dockerflow_middleware.process_request(request) + response = client.get("/__heartbeat__") assert response.status_code == 200 diff --git a/tests/flask/test_flask.py b/tests/flask/test_flask.py index e9ccf26..ca4ceb5 100644 --- a/tests/flask/test_flask.py +++ b/tests/flask/test_flask.py @@ -16,8 +16,13 @@ from flask_sqlalchemy import SQLAlchemy, get_debug_queries from sqlalchemy.exc import DBAPIError, SQLAlchemyError -from dockerflow import health -from dockerflow.flask import Dockerflow, checks +from dockerflow import checks, health +from dockerflow.flask import Dockerflow +from dockerflow.flask.checks import ( + check_database_connected, + check_migrations_applied, + check_redis_connected, +) class MockUser(UserMixin): @@ -48,6 +53,12 @@ def dockerflow(app): return Dockerflow(app) +@pytest.fixture() +def setup_request_summary_logger(dockerflow): + dockerflow.summary_logger.addHandler(logging.NullHandler()) + dockerflow.summary_logger.setLevel(logging.INFO) + + @pytest.fixture def db(app): app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite://" @@ -112,20 +123,18 @@ def version_callback(path): def test_heartbeat(app, dockerflow): # app.debug = True - dockerflow.checks.clear() - response = app.test_client().get("/__heartbeat__") assert response.status_code == 200 - @dockerflow.check + @checks.register def error_check(): return [checks.Error("some error", id="tests.checks.E001")] - @dockerflow.check() + @checks.register() def warning_check(): return [checks.Warning("some warning", id="tests.checks.W001")] - @dockerflow.check(name="warning-check-two") + @checks.register(name="warning-check-two") def warning_check2(): return [checks.Warning("some other warning", id="tests.checks.W002")] @@ -139,18 +148,36 @@ def warning_check2(): assert "warning-check-two" in defaults -def test_heartbeat_logging(app, dockerflow, caplog): - dockerflow.checks.clear() +def test_heartbeat_silenced_checks(app): + Dockerflow(app, silenced_checks=["tests.checks.W001"]) - @dockerflow.check + @checks.register def error_check(): return [checks.Error("some error", id="tests.checks.E001")] - @dockerflow.check() + @checks.register def warning_check(): return [checks.Warning("some warning", id="tests.checks.W001")] - with caplog.at_level(logging.INFO, logger="dockerflow.django"): + response = app.test_client().get("/__heartbeat__") + assert response.status_code == 500 + payload = json.loads(response.data.decode()) + assert payload["status"] == "error" + details = payload["details"] + assert "error_check" in details + assert "warning_check" not in details + + +def test_heartbeat_logging(app, dockerflow, caplog): + @checks.register + def error_check(): + return [checks.Error("some error", id="tests.checks.E001")] + + @checks.register() + def warning_check(): + return [checks.Warning("some warning", id="tests.checks.W001")] + + with caplog.at_level(logging.INFO, logger="dockerflow.checks.registry"): app.test_client().get("/__heartbeat__") logged = [(record.levelname, record.message) for record in caplog.records] @@ -170,8 +197,8 @@ def test_full_redis_check(mocker): app = Flask("redis-check") app.debug = True redis_store = FlaskRedis.from_custom_provider(FakeStrictRedis, app) - dockerflow = Dockerflow(app, redis=redis_store) - assert "check_redis_connected" in dockerflow.checks + Dockerflow(app, redis=redis_store) + assert "check_redis_connected" in checks.get_checks() with app.test_client() as test_client: response = test_client.get("/__heartbeat__") @@ -184,8 +211,8 @@ def test_full_redis_check_error(mocker): redis_store = FlaskRedis.from_custom_provider(FakeStrictRedis, app) ping = mocker.patch.object(redis_store, "ping") ping.side_effect = redis.ConnectionError - dockerflow = Dockerflow(app, redis=redis_store) - assert "check_redis_connected" in dockerflow.checks + Dockerflow(app, redis=redis_store) + assert "check_redis_connected" in checks.get_checks() with app.test_client() as test_client: response = test_client.get("/__heartbeat__") @@ -194,8 +221,8 @@ def test_full_redis_check_error(mocker): def test_full_db_check(mocker, app, db, client): - dockerflow = Dockerflow(app, db=db) - assert "check_database_connected" in dockerflow.checks + Dockerflow(app, db=db) + assert "check_database_connected" in checks.get_checks() response = client.get("/__heartbeat__") assert response.status_code == 200 @@ -205,13 +232,43 @@ def test_full_db_check(mocker, app, db, client): def test_full_db_check_error(mocker, app, db, client): with app.app_context(): mocker.patch.object(db.engine, "connect", side_effect=SQLAlchemyError) - dockerflow = Dockerflow(app, db=db) - assert "check_database_connected" in dockerflow.checks + Dockerflow(app, db=db) + assert "check_database_connected" in checks.get_checks() response = client.get("/__heartbeat__") assert response.status_code == 500 assert json.loads(response.data.decode())["status"] == "error" +def test_full_migrate_check(mocker, client, app, db, migrate): + mocker.patch( + "alembic.script.ScriptDirectory.get_heads", return_value=("17164a7d1c2e",) + ) + mocker.patch( + "alembic.migration.MigrationContext.get_current_heads", + return_value=("17164a7d1c2e",), + ) + Dockerflow(app, migrate=migrate) + with app.app_context(): + assert "check_migrations_applied" in checks.get_checks() + response = client.get("/__heartbeat__") + assert response.status_code == 200 + assert json.loads(response.data.decode())["status"] == "ok" + + +def test_full_migrate_check_error(mocker, client, app, db, migrate): + with app.app_context(): + mocker.patch.object(db.engine, "connect", side_effect=SQLAlchemyError) + Dockerflow(app, migrate=migrate) + assert "check_migrations_applied" in checks.get_checks() + response = client.get("/__heartbeat__") + assert response.status_code == 200 + assert response.json["status"] == "info" + assert ( + health.INFO_CANT_CHECK_MIGRATIONS + in response.json["details"]["check_migrations_applied"]["messages"] + ) + + def assert_log_record(record, errno=0, level=logging.INFO): assert record.levelno == level assert record.errno == errno @@ -226,7 +283,7 @@ def assert_log_record(record, errno=0, level=logging.INFO): headers = {"User-Agent": "dockerflow/tests", "Accept-Language": "tlh"} -def test_request_summary(caplog, app, dockerflow, client): +def test_request_summary(caplog, app, client, setup_request_summary_logger): caplog.set_level(logging.INFO) with app.test_request_context("/"): client.get("/", headers=headers) @@ -344,7 +401,7 @@ def hostile_callback(): def test_db_check_sqlalchemy_error(app, mocker, db): with app.app_context(): mocker.patch.object(db.engine, "connect", side_effect=SQLAlchemyError) - errors = checks.check_database_connected(db) + errors = check_database_connected(db) assert len(errors) == 1 assert errors[0].id == health.ERROR_SQLALCHEMY_EXCEPTION @@ -353,14 +410,14 @@ def test_db_check_dbapi_error(app, mocker, db): with app.app_context(): exception = DBAPIError.instance("", [], Exception(), Exception) mocker.patch.object(db.engine, "connect", side_effect=exception) - errors = checks.check_database_connected(db) + errors = check_database_connected(db) assert len(errors) == 1 assert errors[0].id == health.ERROR_DB_API_EXCEPTION def test_db_check_success(app, db): with app.app_context(): - errors = checks.check_database_connected(db) + errors = check_database_connected(db) assert errors == [] @@ -393,7 +450,7 @@ def test_check_migrations_applied_cannot_check_migrations( ): with app.app_context(): mocker.patch.object(db.engine, "connect", side_effect=exception) - errors = checks.check_migrations_applied(migrate) + errors = check_migrations_applied(migrate) assert len(errors) == 1 assert errors[0].id == health.INFO_CANT_CHECK_MIGRATIONS @@ -407,7 +464,7 @@ def test_check_migrations_applied_success(mocker, app, db, migrate): return_value=("17164a7d1c2e",), ) with app.app_context(): - errors = checks.check_migrations_applied(migrate) + errors = check_migrations_applied(migrate) assert get_heads.called assert get_current_heads.called assert len(errors) == 0 @@ -422,7 +479,7 @@ def test_check_migrations_applied_unapplied_migrations(mocker, app, db, migrate) return_value=("73d96d3120ff",), ) with app.app_context(): - errors = checks.check_migrations_applied(migrate) + errors = check_migrations_applied(migrate) assert get_heads.called assert get_current_heads.called assert len(errors) == 1 @@ -439,7 +496,7 @@ def test_check_migrations_applied_unapplied_migrations(mocker, app, db, migrate) def test_check_redis_connected(mocker, redis_store, exception, error): ping = mocker.patch.object(redis_store, "ping") ping.side_effect = exception - errors = checks.check_redis_connected(redis_store) + errors = check_redis_connected(redis_store) assert len(errors) == 1 assert errors[0].id == error @@ -447,11 +504,11 @@ def test_check_redis_connected(mocker, redis_store, exception, error): def test_check_redis_connected_ping_check(mocker, redis_store): ping = mocker.patch.object(redis_store, "ping") ping.return_value = True - errors = checks.check_redis_connected(redis_store) + errors = check_redis_connected(redis_store) assert len(errors) == 0 ping.return_value = False - errors = checks.check_redis_connected(redis_store) + errors = check_redis_connected(redis_store) assert len(errors) == 1 assert errors[0].id == health.ERROR_REDIS_PING_FAILED @@ -461,3 +518,23 @@ def test_checks_imports(): from dockerflow.flask.checks.messages import level_to_text as b assert a == b + + +def test_heartbeat_checks_legacy(dockerflow, client): + dockerflow.checks.clear() + + @dockerflow.check + def error_check(): + return [checks.Error("some error", id="tests.checks.E001")] + + def error_check_partial(obj): + return [checks.Error(repr(obj), id="tests.checks.E001")] + + dockerflow.init_check(error_check_partial, ("foo", "bar")) + + response = client.get("/__heartbeat__") + assert response.status_code == 500 + payload = response.json + assert payload["status"] == "error" + assert "error_check" in payload["details"] + assert "('foo', 'bar')" in str(payload["details"]["error_check_partial"]) diff --git a/tests/sanic/test_sanic.py b/tests/sanic/test_sanic.py index 5d997be..c799091 100644 --- a/tests/sanic/test_sanic.py +++ b/tests/sanic/test_sanic.py @@ -12,8 +12,8 @@ from sanic_redis import SanicRedis from sanic_testing.testing import SanicTestClient -from dockerflow import health -from dockerflow.sanic import Dockerflow, checks +from dockerflow import checks, health +from dockerflow.sanic import Dockerflow class FakeRedis: @@ -69,6 +69,12 @@ def dockerflow(app): return Dockerflow(app) +@pytest.fixture() +def setup_request_summary_logger(dockerflow): + dockerflow.summary_logger.addHandler(logging.NullHandler()) + dockerflow.summary_logger.setLevel(logging.INFO) + + @pytest.fixture def dockerflow_redis(app): app.config["REDIS"] = {"address": "redis://:password@localhost:6379/0"} @@ -131,24 +137,20 @@ def test_lbheartbeat(dockerflow, test_client): def test_heartbeat(dockerflow, test_client): - dockerflow.checks.clear() - _, response = test_client.get("/__heartbeat__") assert response.status == 200 def test_heartbeat_checks(dockerflow, test_client): - dockerflow.checks.clear() - - @dockerflow.check + @checks.register def error_check(): return [checks.Error("some error", id="tests.checks.E001")] - @dockerflow.check() + @checks.register() def warning_check(): return [checks.Warning("some warning", id="tests.checks.W001")] - @dockerflow.check(name="warning-check-two") + @checks.register(name="warning-check-two") async def warning_check2(): return [checks.Warning("some other warning", id="tests.checks.W002")] @@ -162,18 +164,36 @@ async def warning_check2(): assert "warning-check-two" in details -def test_heartbeat_logging(dockerflow, test_client, caplog): - dockerflow.checks.clear() +def test_heartbeat_silenced_checks(app, test_client): + app = Dockerflow(app, silenced_checks=["tests.checks.E001"]) - @dockerflow.check + @checks.register + def error_check(): + return [checks.Error("some error", id="tests.checks.E001")] + + @checks.register() + def warning_check(): + return [checks.Warning("some warning", id="tests.checks.W001")] + + _, response = test_client.get("/__heartbeat__") + assert response.status == 200 + payload = response.json + assert payload["status"] == "warning" + details = payload["details"] + assert "error_check" not in details + assert "warning_check" in details + + +def test_heartbeat_logging(dockerflow, test_client, caplog): + @checks.register def error_check(): return [checks.Error("some error", id="tests.checks.E001")] - @dockerflow.check() + @checks.register() def warning_check(): return [checks.Warning("some warning", id="tests.checks.W001")] - with caplog.at_level(logging.INFO, logger="dockerflow.sanic"): + with caplog.at_level(logging.INFO, logger="dockerflow.checks.registry"): _, response = test_client.get("/__heartbeat__") logged = [(record.levelname, record.message) for record in caplog.records] @@ -182,7 +202,7 @@ def warning_check(): def test_redis_check(dockerflow_redis, mocker, test_client): - assert "check_redis_connected" in dockerflow_redis.checks + assert "check_redis_connected" in checks.get_checks() mocker.patch.object(sanic_redis.core, "from_url", fake_redis) _, response = test_client.get("/__heartbeat__") assert response.status == 200 @@ -201,7 +221,7 @@ def test_redis_check(dockerflow_redis, mocker, test_client): ], ) def test_redis_check_error(dockerflow_redis, mocker, test_client, error, messages): - assert "check_redis_connected" in dockerflow_redis.checks + assert "check_redis_connected" in checks.get_checks() fake_redis_error = functools.partial(fake_redis, error=error) mocker.patch.object(sanic_redis.core, "from_url", fake_redis_error) _, response = test_client.get("/__heartbeat__") @@ -231,7 +251,7 @@ def assert_log_record(caplog, errno=0, level=logging.INFO, rid=None, t=int, path headers = {"User-Agent": "dockerflow/tests", "Accept-Language": "tlh"} -def test_request_summary(caplog, dockerflow, test_client): +def test_request_summary(caplog, setup_request_summary_logger, test_client): request, _ = test_client.get(headers=headers) assert isinstance(request.ctx.start_timestamp, float) assert request.ctx.id is not None @@ -250,7 +270,9 @@ def exception_raiser(request): assert record.getMessage() == "exception message" -def test_request_summary_failed_request(app, caplog, dockerflow, test_client): +def test_request_summary_failed_request( + app, caplog, setup_request_summary_logger, test_client +): @app.middleware def hostile_callback(request): # simulating resetting request changes @@ -259,3 +281,23 @@ def hostile_callback(request): test_client.get(headers=headers) assert_log_record(caplog, rid=None, t=None) + + +def test_heartbeat_checks_legacy(dockerflow, test_client): + dockerflow.checks.clear() + + @dockerflow.check + def error_check(): + return [checks.Error("some error", id="tests.checks.E001")] + + def error_check_partial(obj): + return [checks.Error(repr(obj), id="tests.checks.E001")] + + dockerflow.init_check(error_check_partial, ("foo", "bar")) + + _, response = test_client.get("/__heartbeat__") + assert response.status == 500 + payload = response.json + assert payload["status"] == "error" + assert "error_check" in payload["details"] + assert "('foo', 'bar')" in str(payload["details"]["error_check_partial"]) diff --git a/tox.ini b/tox.ini index a06a26f..9cef089 100644 --- a/tox.ini +++ b/tox.ini @@ -39,9 +39,9 @@ deps = s22: -ctests/constraints/sanic-22.txt commands = python --version - dj{32,40,41,42}: pytest tests/core/ tests/django --no-migrations -o DJANGO_SETTINGS_MODULE=tests.django.settings -o django_find_project=false {posargs:} - fl{20,21,22}: pytest tests/core/ tests/flask/ {posargs:} - s{21,22}: pytest tests/core/ tests/sanic/ {posargs:} + dj{32,40,41,42}: pytest --no-migrations -o DJANGO_SETTINGS_MODULE=tests.django.settings -o django_find_project=false {posargs:tests/core/ tests/django} + fl{20,21,22}: pytest {posargs:tests/core/ tests/flask/} + s{21,22}: pytest {posargs:tests/core/ tests/sanic/} [testenv:py311-docs] basepython = python3.11