diff --git a/.gitignore b/.gitignore index 55b7717b..1653d5be 100644 --- a/.gitignore +++ b/.gitignore @@ -113,3 +113,4 @@ ENV/ .vscode #Directories /pydash/logs +/pydash/.pytest_cache \ No newline at end of file diff --git a/Pipfile b/Pipfile new file mode 100644 index 00000000..f96308ab --- /dev/null +++ b/Pipfile @@ -0,0 +1,11 @@ +[[source]] +url = "https://pypi.python.org/simple" +verify_ssl = true +name = "pypi" + +[packages] + +[dev-packages] + +[requires] +python_version = "3.6" diff --git a/build_and_run.sh b/build_and_run.sh index 6df3b04e..6025ba87 100755 --- a/build_and_run.sh +++ b/build_and_run.sh @@ -41,7 +41,8 @@ RunFlask() { PydashPrint "Finally: Starting flask webservice. Close with Ctrl+C" cd pydash - pipenv run "flask run --no-reload" + pipenv run flask run --no-reload + cd .. } BuildFrontend diff --git a/pydash/Pipfile b/pydash/Pipfile index 83869d25..2e99e7e2 100644 --- a/pydash/Pipfile +++ b/pydash/Pipfile @@ -1,12 +1,9 @@ [[source]] - url = "https://pypi.python.org/simple" verify_ssl = true name = "pypi" - [packages] - Flask = "*" Flask-WTF = "*" flask-login = "*" @@ -18,16 +15,14 @@ requests = "*" Flask-Cors = "*" pqdict = "*" - [dev-packages] - - +pytest = "*" +pytest-cov = "*" +pytest-xdist = "*" +pytest-env = "*" [requires] - python_version = "3.6" - [pipenv] - keep_outdated = true diff --git a/pydash/Pipfile.lock b/pydash/Pipfile.lock index c185dec0..88c3083a 100644 --- a/pydash/Pipfile.lock +++ b/pydash/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "84c62d6459614d7ce35c60fa0c4e9d94a047405b18331653b0655643b80d8f42" + "sha256": "825d49fe143c66d28b98e57a0bd2f859f29ad5f7bb53d8bb3cfb8ccfbb7fd5f2" }, "pipfile-spec": 6, "requires": { @@ -313,5 +313,136 @@ "version": "==4.5.0" } }, - "develop": {} + "develop": { + "apipkg": { + "hashes": [ + "sha256:2e38399dbe842891fe85392601aab8f40a8f4cc5a9053c326de35a1cc0297ac6", + "sha256:65d2aa68b28e7d31233bb2ba8eb31cda40e4671f8ac2d6b241e358c9652a74b9" + ], + "version": "==1.4" + }, + "attrs": { + "hashes": [ + "sha256:1c7960ccfd6a005cd9f7ba884e6316b5e430a3f1a6c37c5f87d8b43f83b54ec9", + "sha256:a17a9573a6f475c99b551c0e0a812707ddda1ec9653bed04c13841404ed6f450" + ], + "version": "==17.4.0" + }, + "coverage": { + "hashes": [ + "sha256:03481e81d558d30d230bc12999e3edffe392d244349a90f4ef9b88425fac74ba", + "sha256:0b136648de27201056c1869a6c0d4e23f464750fd9a9ba9750b8336a244429ed", + "sha256:104ab3934abaf5be871a583541e8829d6c19ce7bde2923b2751e0d3ca44db60a", + "sha256:15b111b6a0f46ee1a485414a52a7ad1d703bdf984e9ed3c288a4414d3871dcbd", + "sha256:198626739a79b09fa0a2f06e083ffd12eb55449b5f8bfdbeed1df4910b2ca640", + "sha256:1c383d2ef13ade2acc636556fd544dba6e14fa30755f26812f54300e401f98f2", + "sha256:28b2191e7283f4f3568962e373b47ef7f0392993bb6660d079c62bd50fe9d162", + "sha256:2eb564bbf7816a9d68dd3369a510be3327f1c618d2357fa6b1216994c2e3d508", + "sha256:337ded681dd2ef9ca04ef5d93cfc87e52e09db2594c296b4a0a3662cb1b41249", + "sha256:3a2184c6d797a125dca8367878d3b9a178b6fdd05fdc2d35d758c3006a1cd694", + "sha256:3c79a6f7b95751cdebcd9037e4d06f8d5a9b60e4ed0cd231342aa8ad7124882a", + "sha256:3d72c20bd105022d29b14a7d628462ebdc61de2f303322c0212a054352f3b287", + "sha256:3eb42bf89a6be7deb64116dd1cc4b08171734d721e7a7e57ad64cc4ef29ed2f1", + "sha256:4635a184d0bbe537aa185a34193898eee409332a8ccb27eea36f262566585000", + "sha256:56e448f051a201c5ebbaa86a5efd0ca90d327204d8b059ab25ad0f35fbfd79f1", + "sha256:5a13ea7911ff5e1796b6d5e4fbbf6952381a611209b736d48e675c2756f3f74e", + "sha256:69bf008a06b76619d3c3f3b1983f5145c75a305a0fea513aca094cae5c40a8f5", + "sha256:6bc583dc18d5979dc0f6cec26a8603129de0304d5ae1f17e57a12834e7235062", + "sha256:701cd6093d63e6b8ad7009d8a92425428bc4d6e7ab8d75efbb665c806c1d79ba", + "sha256:7608a3dd5d73cb06c531b8925e0ef8d3de31fed2544a7de6c63960a1e73ea4bc", + "sha256:76ecd006d1d8f739430ec50cc872889af1f9c1b6b8f48e29941814b09b0fd3cc", + "sha256:7aa36d2b844a3e4a4b356708d79fd2c260281a7390d678a10b91ca595ddc9e99", + "sha256:7d3f553904b0c5c016d1dad058a7554c7ac4c91a789fca496e7d8347ad040653", + "sha256:7e1fe19bd6dce69d9fd159d8e4a80a8f52101380d5d3a4d374b6d3eae0e5de9c", + "sha256:8c3cb8c35ec4d9506979b4cf90ee9918bc2e49f84189d9bf5c36c0c1119c6558", + "sha256:9d6dd10d49e01571bf6e147d3b505141ffc093a06756c60b053a859cb2128b1f", + "sha256:9e112fcbe0148a6fa4f0a02e8d58e94470fc6cb82a5481618fea901699bf34c4", + "sha256:ac4fef68da01116a5c117eba4dd46f2e06847a497de5ed1d64bb99a5fda1ef91", + "sha256:b8815995e050764c8610dbc82641807d196927c3dbed207f0a079833ffcf588d", + "sha256:be6cfcd8053d13f5f5eeb284aa8a814220c3da1b0078fa859011c7fffd86dab9", + "sha256:c1bb572fab8208c400adaf06a8133ac0712179a334c09224fb11393e920abcdd", + "sha256:de4418dadaa1c01d497e539210cb6baa015965526ff5afc078c57ca69160108d", + "sha256:e05cb4d9aad6233d67e0541caa7e511fa4047ed7750ec2510d466e806e0255d6", + "sha256:e4d96c07229f58cb686120f168276e434660e4358cc9cf3b0464210b04913e77", + "sha256:f3f501f345f24383c0000395b26b726e46758b71393267aeae0bd36f8b3ade80", + "sha256:f8a923a85cb099422ad5a2e345fe877bbc89a8a8b23235824a93488150e45f6e" + ], + "version": "==4.5.1" + }, + "execnet": { + "hashes": [ + "sha256:a7a84d5fa07a089186a329528f127c9d73b9de57f1a1131b82bb5320ee651f6a", + "sha256:fc155a6b553c66c838d1a22dba1dc9f5f505c43285a878c6f74a79c024750b83" + ], + "version": "==1.5.0" + }, + "more-itertools": { + "hashes": [ + "sha256:0dd8f72eeab0d2c3bd489025bb2f6a1b8342f9b198f6fc37b52d15cfa4531fea", + "sha256:11a625025954c20145b37ff6309cd54e39ca94f72f6bb9576d1195db6fa2442e", + "sha256:c9ce7eccdcb901a2c75d326ea134e0886abfbea5f93e91cc95de9507c0816c44" + ], + "version": "==4.1.0" + }, + "pluggy": { + "hashes": [ + "sha256:7f8ae7f5bdf75671a718d2daf0a64b7885f74510bcd98b1a0bb420eb9a9d0cff", + "sha256:d345c8fe681115900d6da8d048ba67c25df42973bda370783cd58826442dcd7c", + "sha256:e160a7fcf25762bb60efc7e171d4497ff1d8d2d75a3d0df7a21b76821ecbf5c5" + ], + "version": "==0.6.0" + }, + "py": { + "hashes": [ + "sha256:29c9fab495d7528e80ba1e343b958684f4ace687327e6f789a94bf3d1915f881", + "sha256:983f77f3331356039fdd792e9220b7b8ee1aa6bd2b25f567a963ff1de5a64f6a" + ], + "version": "==1.5.3" + }, + "pytest": { + "hashes": [ + "sha256:54713b26c97538db6ff0703a12b19aeaeb60b5e599de542e7fca0ec83b9038e8", + "sha256:829230122facf05a5f81a6d4dfe6454a04978ea3746853b2b84567ecf8e5c526" + ], + "index": "pypi", + "version": "==3.5.1" + }, + "pytest-cov": { + "hashes": [ + "sha256:03aa752cf11db41d281ea1d807d954c4eda35cfa1b21d6971966cc041bbf6e2d", + "sha256:890fe5565400902b0c78b5357004aab1c814115894f4f21370e2433256a3eeec" + ], + "index": "pypi", + "version": "==2.5.1" + }, + "pytest-env": { + "hashes": [ + "sha256:7e94956aef7f2764f3c147d216ce066bf6c42948bb9e293169b1b1c880a580c2" + ], + "index": "pypi", + "version": "==0.6.2" + }, + "pytest-forked": { + "hashes": [ + "sha256:e4500cd0509ec4a26535f7d4112a8cc0f17d3a41c29ffd4eab479d2a55b30805", + "sha256:f275cb48a73fc61a6710726348e1da6d68a978f0ec0c54ece5a5fae5977e5a08" + ], + "version": "==0.2" + }, + "pytest-xdist": { + "hashes": [ + "sha256:be2662264b035920ba740ed6efb1c816a83c8a22253df7766d129f6a7bfdbd35", + "sha256:e8f5744acc270b3e7d915bdb4d5f471670f049b6fbd163d4cbd52203b075d30f" + ], + "index": "pypi", + "version": "==1.22.2" + }, + "six": { + "hashes": [ + "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9", + "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb" + ], + "version": "==1.11.0" + } + } } diff --git a/pydash/conftest.py b/pydash/conftest.py new file mode 100644 index 00000000..71b09242 --- /dev/null +++ b/pydash/conftest.py @@ -0,0 +1,9 @@ +import pytest +import pydash_database +import pydash_app.user.repository +import pydash_app.dashboard.repository +@pytest.fixture(autouse=True) +def clean_in_memory_database(*_): + pydash_app.user.repository.clear_all() + pydash_app.dashboard.repository.clear_all() + diff --git a/pydash/pydash_app/impl/fetch.py b/pydash/flask_monitoring_dashboard_client/__init__.py similarity index 89% rename from pydash/pydash_app/impl/fetch.py rename to pydash/flask_monitoring_dashboard_client/__init__.py index 5bb98c3c..bf3c872b 100644 --- a/pydash/pydash_app/impl/fetch.py +++ b/pydash/flask_monitoring_dashboard_client/__init__.py @@ -1,14 +1,22 @@ +""" +Performs the remote requests to the flask-monitoring-dashboard. + +The method names in this module 1:1 reflect the names of the flask-monitoring-dashboard API +(but without the word 'JSON' in them, because conversion from JSON to Python dictionaries/lists +is one of the thing this module handles for you.) +""" + import requests import jwt import json -import pydash_app.impl.logger as pylog +import pydash_logger DETAILS_ENDPOINT = 0 RULES_ENDPOINT = 1 DATA_ENDPOINT = 2 -logger = pylog.Logger(__name__) +logger = pydash_logger.Logger(__name__) def get_details(dashboard_url): """ diff --git a/pydash/pydash_app/impl/periodic_tasks/__init__.py b/pydash/periodic_tasks/__init__.py similarity index 94% rename from pydash/pydash_app/impl/periodic_tasks/__init__.py rename to pydash/periodic_tasks/__init__.py index 004ee090..1d7c8e8c 100644 --- a/pydash/pydash_app/impl/periodic_tasks/__init__.py +++ b/pydash/periodic_tasks/__init__.py @@ -19,10 +19,11 @@ The scheduler will be started by calling the `start()` function. It will stop scheduling and tear down the spawned processes when calling the `stop()` function. This function will also (in most cases) be automatically called when the main process finishes execution. + Example code with default scheduler: - >>> import pydash_app.impl.periodic_tasks as pt + >>> import periodic_tasks as pt >>> import datetime >>> pt.start_default_scheduler() >>> pt.add_periodic_task('foo', datetime.timedelta(seconds=3), pt.foo) @@ -30,15 +31,19 @@ >>> pt.add_background_task('baz', pt.baz) >>> pt.add_periodic_task('bar', datetime.timedelta(seconds=1), pt.bar) # overrides previous `bar` task with new settings >>> pt.remove_task('foo') + >>> pt.default_task_scheduler.stop() + Example code with custom scheduler: - >>> import pydash_app.impl.periodic_tasks as pt + >>> import periodic_tasks as pt >>> ts = pt.TaskScheduler() - >>> import datetime + >>> import datetime, time >>> ts.start() - >>> ts.add_periodic_task('foo', datetime.timedelta(seconds=1), pt.foo) - >>> ts.add_periodic_task('bar', datetime.timedelta(seconds=5), pt.bar) + >>> ts.add_periodic_task('foo', datetime.timedelta(milliseconds=1), pt.foo) + >>> ts.add_periodic_task('bar', datetime.timedelta(milliseconds=5), pt.bar) + >>> time.sleep(2) + >>> ts.stop() """ from .task_scheduler import TaskScheduler diff --git a/pydash/pydash_app/impl/periodic_tasks/pqdict_iter_upto_priority.py b/pydash/periodic_tasks/pqdict_iter_upto_priority.py similarity index 100% rename from pydash/pydash_app/impl/periodic_tasks/pqdict_iter_upto_priority.py rename to pydash/periodic_tasks/pqdict_iter_upto_priority.py diff --git a/pydash/pydash_app/impl/periodic_tasks/queue_nonblocking_iter.py b/pydash/periodic_tasks/queue_nonblocking_iter.py similarity index 100% rename from pydash/pydash_app/impl/periodic_tasks/queue_nonblocking_iter.py rename to pydash/periodic_tasks/queue_nonblocking_iter.py diff --git a/pydash/pydash_app/impl/periodic_tasks/task_scheduler.py b/pydash/periodic_tasks/task_scheduler.py similarity index 78% rename from pydash/pydash_app/impl/periodic_tasks/task_scheduler.py rename to pydash/periodic_tasks/task_scheduler.py index 895d4dd5..d619c71c 100644 --- a/pydash/pydash_app/impl/periodic_tasks/task_scheduler.py +++ b/pydash/periodic_tasks/task_scheduler.py @@ -4,6 +4,8 @@ """ +import os +import signal import multiprocessing import atexit import datetime @@ -12,7 +14,30 @@ from .pqdict_iter_upto_priority import pqdict_iter_upto_priority from .queue_nonblocking_iter import queue_nonblocking_iter +from pytest_cov.embed import cleanup + +# import os +# if "TESTING" in os.environ: +# multiprocessing.Process = threading.Thread + class _Task: + """ + A task that can be run using the TaskScheduler. + + Usually you'd want to use one of the more concrete instances of this class. + + >>> def awesome_fun(): + ... print("Awesome!") + >>> task = _Task("mytask", awesome_fun) + + >>> + >>> task = _Task("error_task", 10) + Traceback (most recent call last): + ... + ValueError + + """ + def __init__(self, name, target): """ :name: An identifier to find this task again later (and e.g. remove or alter it). Can be any hashable (using a string or a tuple of strings/integers is common.) @@ -28,15 +53,52 @@ def __init__(self, name, target): self.next_run_dt = datetime.datetime.now() def __call__(self, *args, **kwargs): - self.target(*args, **kwargs) + """ + It is possible to manually call this task using arguments, but usually the functions do not contain extra arguments + + >>> def awesome_fun(text): + ... print(text) + >>> task = _Task("mytask", awesome_fun) + >>> task("Awesome!") + Awesome! + + """ + if "TESTING" in os.environ: + signal.signal(signal.SIGTERM, cleanup) + return self.target(*args, **kwargs) def __hash__(self): + """ + All tasks are hashed, such that two tasks with the same name are considered equal. + This is to ensure that new instances of tasks with the same name replace old instances of these tasks inside the scheduler. + + + >>> def awesome_fun1(): + ... print("foo") + >>> def awesome_fun2(): + ... print("bar") + >>> task = _Task("mytask", awesome_fun1) + >>> task2 = _Task("mytask", awesome_fun1) + >>> task == task2 + True + >>> hash(task) == hash(task2) + True + """ return hash(self.name) def __eq__(self, other): return isinstance(other, _Task) and other.name == self.name def __repr__(self): + """ + Tasks have a special string representation for easy introspection. + + >>> def awesome_fun(): + ... print("Awesome!") + >>> task = _Task("mytask", awesome_fun) + >>> f"{task}".startswith("<_Task name=mytask") + True + """ return f"<{self.__class__.__name__} name={self.name}, target={self.target}, next_run_dt={self.next_run_dt}>" @@ -74,6 +136,17 @@ def __init__(self, name, task, interval, run_at_start=False): :target: A function (or other callable) that will perform this task's functionality. :interval: A datetime.timedelta representing how frequently to run the given target. :run_at_start: If true, runs task right after it was added to the scheduler, rather than only after the first interval has passed. + + It is expected that `interval` is a datetime.timedelta. + + >>> import periodic_tasks as pt + >>> def awesome_fun(): + ... print("Awesome!") + >>> _PeriodicTask("foo", awesome_fun, 42) + Traceback (most recent call last): + ... + ValueError + """ super().__init__(name, task) @@ -122,7 +195,7 @@ class TaskScheduler: This function will also (in most cases) be automatically called when the main process finishes execution. """ - def __init__(self, granularity=1.0, pool_settings={}): + def __init__(self, granularity=0.1, pool_settings={}): """ :granularity: How often the scheduler should check if a periodic task's timeout has passed, in seconds. Defaults to `1.0`. :pool_settings: A dictionary of keyword-arguments to pass to the initialization of the multiprocessing.Pool that will be used to run the tasks on. @@ -131,6 +204,7 @@ def __init__(self, granularity=1.0, pool_settings={}): self._pool = None self._granularity = granularity self._tasks_to_be_scheduled = multiprocessing.Queue() + self._graceful_shutdown = multiprocessing.Value('i', 0) self.pool_settings = pool_settings def add_periodic_task(self, name, interval, task, run_at_start=False): @@ -174,6 +248,17 @@ def start(self): Starts the scheduler scheduling loop on a separate process. Should only be called once per scheduler. + + + >>> import periodic_tasks as pt + >>> ts = pt.TaskScheduler() + >>> ts.start() + >>> ts.start() + Traceback (most recent call last): + ... + Exception + + """ if hasattr(self, '_scheduler_process'): raise Exception("TaskScheduler.start() called multiple times.") @@ -191,9 +276,21 @@ def stop(self): Should only be called once per scheduler, and only after `start()` was called. When the program exits suddenly, this function will (in most cases) automatically be called to clean up the scheduling process. + + + >>> import periodic_tasks as pt + >>> ts = pt.TaskScheduler() + >>> ts.stop() + Traceback (most recent call last): + ... + Exception + """ if not hasattr(self, '_scheduler_process'): raise Exception("`TaskScheduler.stop()` called before calling `TaskScheduler.start()`") + + self._graceful_shutdown = 1 + self._scheduler_process.join(self._granularity) self._scheduler_process.terminate() def _scheduling_loop(self): @@ -201,8 +298,10 @@ def _scheduling_loop(self): Executed in the separate process. It makes sure all tasks are run whenever their time has come. """ + if "TESTING" in os.environ: + signal.signal(signal.SIGTERM, cleanup) with multiprocessing.Pool(**self.pool_settings) as pool: - while True: + while self._graceful_shutdown != 1: self._add_tasks_to_be_scheduled() current_time = datetime.datetime.now() self._run_waiting_tasks(pool, current_time) @@ -242,4 +341,4 @@ def _run_task(self, pool, task): Executes a single task in one of the pool's processes """ res = pool.apply_async(task, ()) - res.get() # <- Uncomment this line to debug tasks in an easy way. Serializes task execution however! + # res.get() # <- Uncomment this line to debug tasks in an easy way. Serializes task execution however! diff --git a/pydash/pydash_app/__init__.py b/pydash/pydash_app/__init__.py index 3b986c04..bf954d9d 100644 --- a/pydash/pydash_app/__init__.py +++ b/pydash/pydash_app/__init__.py @@ -1,30 +1,35 @@ """ The `pydash_app` package contains all business domain logic of the PyDash application: Everything that is not part of rendering a set of webpages. """ -import pydash_app.user -import pydash_app.dashboard -from pydash_app.impl.periodic_tasks import default_task_scheduler -from pydash_app.fetching.dashboard_fetch import schedule_periodic_dashboard_fetching +import periodic_tasks + +import pydash_app.user.services.seeding +import pydash_app.user as user + +import pydash_app.dashboard.services.fetching +import pydash_app.dashboard.services.seeding +import pydash_app.dashboard as dashboard def start_task_scheduler(): - default_task_scheduler.start() + periodic_tasks.default_task_scheduler.start() def stop_task_scheduler(): - default_task_scheduler.stop() + periodic_tasks.default_task_scheduler.stop() def schedule_periodic_tasks(): import datetime # <- remove this line when custom interval no longer necessary for testing. - schedule_periodic_dashboard_fetching( - interval=datetime.timedelta(seconds=5)) + dashboard.services.fetching.schedule_all_periodic_dashboards_tasks( + interval=datetime.timedelta(minutes=1) + ) def seed_datastructures(): # Ensure no periodic tasks with old datastructures are run: stop_task_scheduler() - user.user_repository.seed_users() - dashboard.dashboard_repository.seed_dashboards() + user.services.seeding.seed() + dashboard.services.seeding.seed() diff --git a/pydash/pydash_app/dashboard/__init__.py b/pydash/pydash_app/dashboard/__init__.py index 632de320..b0d43661 100644 --- a/pydash/pydash_app/dashboard/__init__.py +++ b/pydash/pydash_app/dashboard/__init__.py @@ -3,7 +3,7 @@ for interacting with Dashboards. """ from .dashboard import Dashboard -import pydash_app.dashboard.dashboard_repository +import pydash_app.dashboard.repository def find(dashboard_id): @@ -12,7 +12,7 @@ def find(dashboard_id): :param dashboard_id: UUID of the dashboard we hope to find. :return: The Dashboard-entity with the given UUID or None if it could not be found. """ - return dashboard_repository.find(dashboard_id) + return repository.find(dashboard_id) def dashboards_of_user(user_id): @@ -21,7 +21,4 @@ def dashboards_of_user(user_id): :param user_id: The UUID of the user whose dashboards we're requesting. :return: A list of Dashboard-entities. """ - return [db for db in dashboard_repository.all() if db.user_id == user_id] - -# TODO: As soon as PydashWeb requests data from the dashboard, -# use this module as intermediary between pydash_web and the dashboard_repository + return [db for db in repository.all() if db.user_id == user_id] diff --git a/pydash/pydash_app/dashboard/dashboard.py b/pydash/pydash_app/dashboard/dashboard.py index 6ac76052..2974ac15 100644 --- a/pydash/pydash_app/dashboard/dashboard.py +++ b/pydash/pydash_app/dashboard/dashboard.py @@ -1,39 +1,44 @@ -import uuid -import persistent +""" -from pydash_app.dashboard.endpoint import Endpoint -from .aggregator import Aggregator +Involved usage example: + +>>> from pydash_app.dashboard.dashboard import Dashboard +>>> from pydash_app.user.user import User +>>> from pydash_app.dashboard.endpoint import Endpoint +>>> from pydash_app.dashboard.endpoint_call import EndpointCall +>>> import uuid +>>> from datetime import datetime, timedelta +>>> user = User("Gandalf", "pass") +>>> d = Dashboard("http://foo.io", str(uuid.uuid4()), str(user.id)) +>>> e1 = Endpoint("foo", True) +>>> e2 = Endpoint("bar", True) +>>> d.add_endpoint(e1) +>>> d.add_endpoint(e2) +>>> ec1 = EndpointCall("foo", 0.5, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S"), "0.1", "None", "127.0.0.1") +>>> ec2 = EndpointCall("foo", 0.1, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S"), "0.1", "None", "127.0.0.2") +>>> ec3 = EndpointCall("bar", 0.2, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S"), "0.1", "None", "127.0.0.1") +>>> ec4 = EndpointCall("bar", 0.2, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S") - timedelta(days=1), "0.1", "None", "127.0.0.1") +>>> ec5 = EndpointCall("bar", 0.2, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S") - timedelta(days=2), "0.1", "None", "127.0.0.1") +>>> d.add_endpoint_call(ec1) +>>> d.add_endpoint_call(ec2) +>>> d.add_endpoint_call(ec3) +>>> d.add_endpoint_call(ec4) +>>> d.add_endpoint_call(ec5) +>>> d.aggregated_data() +{'total_visits': 5, 'total_execution_time': 1.2, 'average_execution_time': 0.24, 'visits_per_day': {'2018-04-25': 3, '2018-04-24': 1, '2018-04-23': 1}, 'visits_per_ip': {'127.0.0.1': 4, '127.0.0.2': 1}, 'unique_visitors': 2, 'unique_visitors_per_day': {'2018-04-25': 2, '2018-04-24': 1, '2018-04-23': 1}} +>>> d.endpoints['foo'].aggregated_data() +{'total_visits': 2, 'total_execution_time': 0.6, 'average_execution_time': 0.3, 'visits_per_day': {'2018-04-25': 2}, 'visits_per_ip': {'127.0.0.1': 1, '127.0.0.2': 1}, 'unique_visitors': 2, 'unique_visitors_per_day': {'2018-04-25': 2}} +>>> d.endpoints['bar'].aggregated_data() +{'total_visits': 3, 'total_execution_time': 0.6000000000000001, 'average_execution_time': 0.20000000000000004, 'visits_per_day': {'2018-04-25': 1, '2018-04-24': 1, '2018-04-23': 1}, 'visits_per_ip': {'127.0.0.1': 3}, 'unique_visitors': 1, 'unique_visitors_per_day': {'2018-04-25': 1, '2018-04-24': 1, '2018-04-23': 1}} """ -Example testing code: +import uuid +import persistent -``` -from pydash_app.dashboard.dashboard import Dashboard from pydash_app.dashboard.endpoint import Endpoint -from pydash_app.dashboard.endpoint_call import EndpointCall -import uuid -from datetime import datetime, timedelta -d = Dashboard("http://foo.io", str(uuid.uuid4())) -e1 = Endpoint("foo", True) -e2 = Endpoint("bar", True) -d.add_endpoint(e1) -d.add_endpoint(e2) -ec1 = EndpointCall("foo", 0.5, datetime.now(), 0.1, "None", "127.0.0.1") -ec2 = EndpointCall("foo", 0.1, datetime.now(), 0.1, "None", "127.0.0.2") -ec3 = EndpointCall("bar", 0.2, datetime.now(), 0.1, "None", "127.0.0.1") -ec4 = EndpointCall("bar", 0.2, datetime.now() - timedelta(days=1), 0.1, "None", "127.0.0.1") -ec5 = EndpointCall("bar", 0.2, datetime.now() - timedelta(days=2), 0.1, "None", "127.0.0.1") -d.add_endpoint_call(ec1) -d.add_endpoint_call(ec2) -d.add_endpoint_call(ec3) -d.add_endpoint_call(ec4) -d.add_endpoint_call(ec5) -d.aggregated_data() -d.endpoints['foo'].aggregated_data() -d.endpoints['bar'].aggregated_data() -``` -""" +from .aggregator import Aggregator + class Dashboard(persistent.Persistent): diff --git a/pydash/pydash_app/dashboard/dashboard_repository.py b/pydash/pydash_app/dashboard/dashboard_repository.py deleted file mode 100644 index ddadfa65..00000000 --- a/pydash/pydash_app/dashboard/dashboard_repository.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -This module handles the persistence of `Dashboard` entities: - -It is an adapter of the actual persistence layer, to insulate the application -from datastore-specific details. - -It handles a subset of the following tasks -(specifically, it only actually contains functions for the tasks the application needs in its current state!): -- Creating new entities of the specified type. -- Finding them based on certain attributes. -- Persisting updated versions of existing entities. -- Deleting entities from the persistence layer. -""" -import uuid -import BTrees.OOBTree -import transaction -from ..impl.database import database_root, MultiIndexedPersistentCollection -import pydash_app.impl.database - - -if not hasattr(database_root(), 'dashboards'): - database_root().dashboards = MultiIndexedPersistentCollection({'id'}) - - -def find(dashboard_id): - # Ensure that this is also callable with strings or integers: - dashboard_id = uuid.UUID(dashboard_id) - print(f"Starting to look for dashboard {dashboard_id}") - - try: - res = database_root().dashboards['id', dashboard_id] - print(f"FOUND DASHBOARD in find_dashboard: {res}") - return res - except Exception as e: - print(f"EXCEPTION: {e}") - raise - - -def all(): - return database_root().dashboards.values() - - -def add(dashboard): - try: - database_root().dashboards.add(dashboard) - transaction.commit() - except KeyError: - transaction.abort() - raise - - -def update(dashboard): - try: - database_root().dashboards.update_item(dashboard) - transaction.commit() - except KeyError: - transaction.abort() - raise - - -def seed_dashboards(): - """ - For each user, stores some preliminary debug dashboards in the datastore, - to be used during development. - Note: for now the two dashboards that are being returned are - identical, apart from the url. - """ - - from pydash_app.dashboard.dashboard import Dashboard - from pydash_app.user import user_repository - from pydash_app.dashboard.endpoint import Endpoint - from pydash_app.dashboard.endpoint_call import EndpointCall - from datetime import datetime, timedelta - - # Clear current Dashboards-DB. - database_root().dashboards = MultiIndexedPersistentCollection({'id'}) - - # # Fill in dashboards. - # _dev_dashboard_urls = ['http://pydash.io/', 'http://pystach.io/'] - # _dev_endpoint_calls = [EndpointCall("foo", 0.5, datetime.now(), "0.1", "None", "127.0.0.1"), - # EndpointCall("foo", 0.1, datetime.now(), "0.1", "None", "127.0.0.2"), - # EndpointCall("bar", 0.2, datetime.now(), "0.1", "None", "127.0.0.1"), - # EndpointCall("bar", 0.2, datetime.now() - timedelta(days=1), "0.1", "None", "127.0.0.1"), - # EndpointCall("bar", 0.2, datetime.now() - timedelta(days=2), "0.1", "None", "127.0.0.1") - # ] - # - # # Instead of storing the endpoints in a list, we generate them on the fly, - # # to avoid users sharing the same endpoints for now, as we'd like to have a controlled environment for every user - # # during this stage of development. - # for user in user_repository.all(): - # for url in _dev_dashboard_urls: - # dashboard = Dashboard(url, user.get_id()) - # for endpoint in [Endpoint("foo", True), Endpoint("bar", True)]: - # dashboard.add_endpoint(endpoint) - # for endpoint_call in _dev_endpoint_calls: - # dashboard.add_endpoint_call(endpoint_call) - # print(f'Adding dashboard {dashboard}') - # add(dashboard) - - # TEST - from pydash_app.fetching.dashboard_fetch import initialize_endpoints, initialize_endpoint_calls - for user in user_repository.all(): - dashboard = Dashboard("http://136.243.248.188:9001/dashboard", - "cc83733cb0af8b884ff6577086b87909", - user.get_id()) - print(f'Adding dashboard {dashboard}') - add(dashboard) - print(f'Initialising dashboard {dashboard}') - initialize_endpoints(dashboard) - initialize_endpoint_calls(dashboard) - print(f'Initialized dashboard') - print(f'- {len(dashboard.endpoints)} endpoints found') - print(f'- {len(dashboard._endpoint_calls)} historical endpoint calls') - - print('Seeding of dashboards is done!') diff --git a/pydash/pydash_app/dashboard/endpoint_call.py b/pydash/pydash_app/dashboard/endpoint_call.py index 4c7564bf..a541c062 100644 --- a/pydash/pydash_app/dashboard/endpoint_call.py +++ b/pydash/pydash_app/dashboard/endpoint_call.py @@ -9,6 +9,11 @@ class EndpointCall(persistent.Persistent): As with the other entity classes, it does not concern itself with the implementation of its persistence, as it doesn't exist on its own. If this were the case, the `endpointcall_repository` would handle this concern. + + >>> endpoint_call = EndpointCall("foo", 0.5, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S"), "0.1", "None", "127.0.0.1") + >>> endpoint_call.as_dict() + {'endpoint': 'foo', 'execution_time': 0.5, 'time': datetime.datetime(2018, 4, 25, 15, 29, 23), 'version': '0.1', 'group_by': 'None', 'ip': '127.0.0.1'} + """ def __init__(self, endpoint, execution_time, time, version, group_by, ip): @@ -20,6 +25,34 @@ def __init__(self, endpoint, execution_time, time, version, group_by, ip): :param version: String denoting the dashboard's version number. :param group_by: String denoting which user is calling the function (?). :param ip: String denoting the IP-address the endpoint call was made from. + + The types entered are checked: + >>> endpoint_call = EndpointCall("foo", 0.5, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S"), "0.1", "None", "127.0.0.1") + >>> EndpointCall(10, 0.5, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S"), "0.1", "None", "127.0.0.1") + Traceback (most recent call last): + ... + TypeError + >>> EndpointCall("foo", "bar", datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S"), "0.1", "None", "127.0.0.1") + Traceback (most recent call last): + ... + TypeError + >>> EndpointCall("foo", 0.5, 10, "0.1", "None", "127.0.0.1") + Traceback (most recent call last): + ... + TypeError + >>> EndpointCall("foo", 0.5, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S"), 1234, "None", "127.0.0.1") + Traceback (most recent call last): + ... + TypeError + >>> EndpointCall("foo", 0.5, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S"), "0.1", 42, "127.0.0.1") + Traceback (most recent call last): + ... + TypeError + >>> EndpointCall("foo", 0.5, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S"), "0.1", "None", 42) + Traceback (most recent call last): + ... + TypeError + """ EndpointCall.__check_arg_types(endpoint, execution_time, time, version, group_by, ip) @@ -30,6 +63,31 @@ def __init__(self, endpoint, execution_time, time, version, group_by, ip): self.group_by = group_by # string self.ip = ip # string + def __repr__(self): + """ + Returns a string representation of this EndpointCall, for easy debugging and logging: + + >>> EndpointCall("foo", 0.5, datetime.strptime("2018-04-25 15:29:23", "%Y-%m-%d %H:%M:%S"), "0.1", "None", "127.0.0.1") + + + """ + return f'''<{self.__class__.__name__} + endpoint={self.endpoint} + execution_time={self.execution_time} + time={self.time} + version={self.version} + group_by={self.group_by} + ip={self.ip} +>''' + + @staticmethod def __check_arg_types(endpoint, execution_time, time, version, group_by, ip): if not isinstance(endpoint, str): diff --git a/pydash/pydash_app/dashboard/repository.py b/pydash/pydash_app/dashboard/repository.py new file mode 100644 index 00000000..ea42955e --- /dev/null +++ b/pydash/pydash_app/dashboard/repository.py @@ -0,0 +1,68 @@ +""" +This module handles the persistence of `Dashboard` entities: + +It is an adapter of the actual persistence layer, to insulate the application +from datastore-specific details. + +It handles a subset of the following tasks +(specifically, it only actually contains functions for the tasks the application needs in its current state!): +- Creating new entities of the specified type. +- Finding them based on certain attributes. +- Persisting updated versions of existing entities. +- Deleting entities from the persistence layer. +""" +import uuid +import BTrees.OOBTree +import transaction +from pydash_database import database_root, MultiIndexedPersistentCollection + + +if not hasattr(database_root(), 'dashboards'): + print("CREATING DASHBOARDS OBJECT") + transaction.begin() + database_root().dashboards = MultiIndexedPersistentCollection({'id'}) + transaction.commit() + + +def find(dashboard_id): + # Ensure that this is also callable with strings or integers: + if not isinstance(dashboard_id, uuid.UUID): + dashboard_id = uuid.UUID(dashboard_id) + print(f"Starting to look for dashboard {dashboard_id}") + + try: + res = database_root().dashboards['id', dashboard_id] + print(f"FOUND DASHBOARD in find_dashboard: {res}") + return res + except Exception as e: + print(f"EXCEPTION: {e}") + raise + + +def all(): + return database_root().dashboards.values() + + +def add(dashboard): + try: + database_root().dashboards.add(dashboard) + transaction.commit() + except KeyError: + transaction.abort() + raise + + +def update(dashboard): + # Update item itself: + transaction.commit() + + # Update indexes for item: + for attempt in transaction.manager.attempts(): + with attempt: + database_root().dashboards.update_item(dashboard) + transaction.begin() + +def clear_all(): + transaction.begin() + database_root().dashboards = MultiIndexedPersistentCollection({'id'}) + transaction.commit() diff --git a/pydash/pydash_app/dashboard/services/__init__.py b/pydash/pydash_app/dashboard/services/__init__.py new file mode 100644 index 00000000..d1a44322 --- /dev/null +++ b/pydash/pydash_app/dashboard/services/__init__.py @@ -0,0 +1,7 @@ +""" +Contains services for the 'Dashboard' concern. + +These are things that use or manipulate 'Dashboard' entities to perform tasks, +where these tasks are either too complex to put in the Dashboard Entity, +or where these are heavily interacting with outside logic that the business domain entity should not concern itself with directly. +""" diff --git a/pydash/pydash_app/dashboard/services/fetching.py b/pydash/pydash_app/dashboard/services/fetching.py new file mode 100644 index 00000000..28a8bdb2 --- /dev/null +++ b/pydash/pydash_app/dashboard/services/fetching.py @@ -0,0 +1,216 @@ +from functools import partial +from datetime import datetime, timedelta, timezone + +import pydash_database +import flask_monitoring_dashboard_client +from pydash_app.dashboard.endpoint import Endpoint +from pydash_app.dashboard.endpoint_call import EndpointCall +import pydash_app.dashboard.repository as dashboard_repository +import periodic_tasks + + +def schedule_all_periodic_dashboards_tasks( + interval=timedelta(hours=1), + scheduler=periodic_tasks.default_task_scheduler): + """ + Sets up all tasks that should be run periodically for each of the dashboards. + (For now, that is only the EndpointCall fetching task.) + + """ + for dashboard in dashboard_repository.all(): + if dashboard.last_fetch_time is None: + schedule_historic_dashboard_fetching( + dashboard, scheduler=scheduler) + else: + schedule_periodic_dashboard_fetching( + dashboard, interval=interval, scheduler=scheduler) + + +def schedule_periodic_dashboard_fetching( + dashboard, + interval=timedelta(hours=1), + scheduler=periodic_tasks.default_task_scheduler): + """ + Schedules the periodic EndpointCall fetching task for this dashboard. + """ + print(f'Creating periodic fetching task for {dashboard}') + periodic_tasks.add_periodic_task( + name=("dashboard", dashboard.id, "fetching"), + task=partial(fetch_and_update_new_dashboard_info, dashboard.id), + interval=interval, + scheduler=scheduler) + + +def schedule_historic_dashboard_fetching( + dashboard, scheduler=periodic_tasks.default_task_scheduler): + """ + Schedules the fetching of historic EndpointCall information as a background task. + The periodic fetching of new EndpointCall information is scheduled as soon as this task completes. + """ + + def task(dashboard_id): + fetch_and_update_historic_dashboard_info(dashboard_id) + schedule_periodic_dashboard_fetching(dashboard_id) + + periodic_tasks.add_background_task( + name=("dashboard", dashboard.id, "historic_fetching"), + task=partial(task, dashboard.id), + scheduler=scheduler) + + +def fetch_and_update_new_dashboard_info(dashboard_id): + """ + Updates the dashboard with the new EndpointCall information that is fetched from the Dashboard's remote location. + """ + dashboard = dashboard_repository.find(dashboard_id) + print("INSIDE FETCH FUNCTION") + fetch_and_add_endpoint_calls(dashboard) + + print(f'- {len(dashboard.endpoints)} endpoints found') + print(f'- {len(dashboard._endpoint_calls)} endpoint calls') + + dashboard_repository.update(dashboard) + + print(f'- {len(dashboard.endpoints)} endpoints found') + print(f'- {len(dashboard._endpoint_calls)} endpoint calls') + + print(f"Dashboard {dashboard_id} updated.") + + +def fetch_and_update_historic_dashboard_info(dashboard_id): + """ + Updates the dashboard with the historic EndpointCall information that is fetched from the Dashboard's remote location. + """ + dashboard = dashboard_repository.find(dashboard_id) + print("INSIDE INITIAL DASHBOARD FETCHING FUNCTION") + fetch_and_add_endpoints(dashboard) + fetch_and_add_historic_endpoint_calls(dashboard) + + print(f'- {len(dashboard.endpoints)} endpoints found') + print(f'- {len(dashboard._endpoint_calls)} historical endpoint calls') + + dashboard_repository.update(dashboard) + + print(f'- {len(dashboard.endpoints)} endpoints found') + print(f'- {len(dashboard._endpoint_calls)} historical endpoint calls') + + +def fetch_and_add_endpoints(dashboard): + """ + For a given dashboard, initialize it with the endpoints it has registered. + Note that this will not add endpoint call data. + :param dashboard: The dashboard to initialize with endpoints. + """ + + endpoints = _fetch_endpoints(dashboard) + + for endpoint in endpoints: + dashboard.add_endpoint(endpoint) + + +def _fetch_endpoints(dashboard): + """ + Fetches and returns a list of `Endpoint`s in the given dashboard. + :param dashboard: The dashboard for which to fetch endpoints. + :return: A list of `Endpoint`s for the dashboard. + """ + + monitor_rules = flask_monitoring_dashboard_client.get_monitor_rules( + dashboard.url, dashboard.token) + + if monitor_rules is None: + return [] + + return [ + Endpoint(rule['endpoint'], rule['monitor']) for rule in monitor_rules + ] + + +def fetch_and_add_historic_endpoint_calls(dashboard): + """ + For a given dashboard, retrieve all historical endpoint calls and add them to it. + :param dashboard: The dashboard to initialize with historical data. + """ + + # Only run this function if no periodic fetching of latest information has happened yet: + if dashboard.last_fetch_time is not None: + return + + details = flask_monitoring_dashboard_client.get_details(dashboard.url) + first_request = int(details['first_request']) + + start_time = datetime.fromtimestamp(first_request, tz=timezone.utc) + current_time = datetime.now(timezone.utc) + + while start_time < current_time: + # TODO: for now historical data is pulled in chunks of 1 hour (hardcoded) + end_time = start_time + timedelta(hours=1) + + if end_time > current_time: + end_time = current_time + + endpoint_calls = _fetch_endpoint_calls(dashboard, start_time, end_time) + + if endpoint_calls is None: + continue + + for call in endpoint_calls: + dashboard.add_endpoint_call(call) + dashboard.last_fetch_time = call.time + + start_time = end_time + + +def fetch_and_add_endpoint_calls(dashboard): + """ + Retrieve the latest endpoint calls of the given dashboard and add them to it. + :param dashboard: The dashboard for which to update endpoint calls. + """ + print(f"Updating endpoint calls for dashboard: {dashboard}") + + # Only run this function if historic fetching has happened. + if dashboard.last_fetch_time is None: + return + + new_calls = _fetch_endpoint_calls( + dashboard, time_from=dashboard.last_fetch_time) + print(f"New endpoint calls: {new_calls}") + + if new_calls is []: + return [] + + for call in new_calls: + dashboard.add_endpoint_call(call) + + dashboard.last_fetch_time = new_calls[-1].time + print(f"Saved to database: dashboard {dashboard}") + + +def _fetch_endpoint_calls(dashboard, time_from=None, time_to=None): + """ + Fetches and returns a list of `EndpointCall`s for the given dashboard. + :param dashboard: The dashboard for which to fetch endpoint calls. + :param time_from: An operiodic_tasksional datetime indicating only data since that moment should be returned. + :param time_to: An operiodic_tasksional datetime indicating only data up to that point should be returned. + :return: A list of `EndpointCall`s containing the endpoint call data for this dashboard. + """ + + endpoint_requests = flask_monitoring_dashboard_client.get_data( + dashboard.url, dashboard.token, time_from, time_to) + + if endpoint_requests is None: + return [] + + endpoint_calls = [] + for request in endpoint_requests: + # The raw endpoint call data contains a timestamp formatted + # as "yyyy-mm-dd hh:mm:ss.micro" so we need to parse it + time = datetime.strptime(request['time'], '%Y-%m-%d %H:%M:%S.%f') + time.replace(tzinfo=timezone.utc) + + call = EndpointCall(request['endpoint'], request['execution_time'], + time, request['version'], request['group_by'], + request['ip']) + endpoint_calls.append(call) + + return endpoint_calls diff --git a/pydash/pydash_app/dashboard/services/seeding.py b/pydash/pydash_app/dashboard/services/seeding.py new file mode 100644 index 00000000..e85f7898 --- /dev/null +++ b/pydash/pydash_app/dashboard/services/seeding.py @@ -0,0 +1,57 @@ +""" +Fills the application with some preliminary dashboards +to make it easier to test code in development and staging environments. +""" + + +from pydash_app.dashboard.dashboard import Dashboard +import pydash_app.dashboard.repository as repository +import pydash_app.user.repository as user_repository + +import pydash_app.dashboard.services.fetching as fetching + +def seed(): + """ + For each user, stores some preliminary debug dashboards in the datastore, + to be used during development. + Note: for now the two dashboards that are being returned are + identical, apart from the url. + """ + + # Clear current Dashboards-DB. + repository.clear_all() + + # # Fill in dashboards. + # _dev_dashboard_urls = ['http://pydash.io/', 'http://pystach.io/'] + # _dev_endpoint_calls = [EndpointCall("foo", 0.5, datetime.now(), "0.1", "None", "127.0.0.1"), + # EndpointCall("foo", 0.1, datetime.now(), "0.1", "None", "127.0.0.2"), + # EndpointCall("bar", 0.2, datetime.now(), "0.1", "None", "127.0.0.1"), + # EndpointCall("bar", 0.2, datetime.now() - timedelta(days=1), "0.1", "None", "127.0.0.1"), + # EndpointCall("bar", 0.2, datetime.now() - timedelta(days=2), "0.1", "None", "127.0.0.1") + # ] + # + # # Instead of storing the endpoints in a list, we generate them on the fly, + # # to avoid users sharing the same endpoints for now, as we'd like to have a controlled environment for every user + # # during this stage of development. + # for user in user_repository.all(): + # for url in _dev_dashboard_urls: + # dashboard = Dashboard(url, user.get_id()) + # for endpoint in [Endpoint("foo", True), Endpoint("bar", True)]: + # dashboard.add_endpoint(endpoint) + # for endpoint_call in _dev_endpoint_calls: + # dashboard.add_endpoint_call(endpoint_call) + # print(f'Adding dashboard {dashboard}') + # add(dashboard) + + # TEST + # from pydash_app.fetching.fetching import fetch_and_add_endpoints, fetch_and_add_historic_endpoint_calls + # for user in [user_repository.find_by_name('W-M'), user_repository.find_by_name('Koen')]: + for user in user_repository.all(): + dashboard = Dashboard("http://136.243.248.188:9001/dashboard", + "cc83733cb0af8b884ff6577086b87909", + user.get_id()) + print(f'Adding dashboard {dashboard}') + repository.add(dashboard) + print(f'Fetching remote info for dashboard {dashboard}.') + fetching.fetch_and_update_historic_dashboard_info(dashboard.id) + print('Seeding of dashboards is done!') diff --git a/pydash/pydash_app/fetching/__init__.py b/pydash/pydash_app/fetching/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/pydash/pydash_app/fetching/dashboard_fetch.py b/pydash/pydash_app/fetching/dashboard_fetch.py deleted file mode 100644 index 25e7320b..00000000 --- a/pydash/pydash_app/fetching/dashboard_fetch.py +++ /dev/null @@ -1,234 +0,0 @@ -from functools import partial -from datetime import datetime, timedelta, timezone - -import pydash_app.impl.database -from pydash_app.impl.fetch import get_monitor_rules, get_data, get_details -from pydash_app.dashboard.endpoint import Endpoint -from pydash_app.dashboard.endpoint_call import EndpointCall -from pydash_app.dashboard.dashboard_repository import find as find_dashboard, update as update_dashboard - -import pydash_app.impl.periodic_tasks as pt - - -def start_default_scheduler(): - pt.start_default_scheduler() - - -def initialize_dashboard_fetching(dashboard, interval=timedelta(hours=1), scheduler=pt.default_task_scheduler): - """ - Initialize a dashboard from its remote endpoints and add it to the scheduler, with the given interval. - This also fetches and stores all historical data up to this point in time. - :param dashboard: The Dashboard to initialize. - :param interval: The interval at which the endpoints should be fetched. This should be a datetime.timedelta object. - Defaults to 1 hour. - :param scheduler: The scheduler to add the fetch calls to. - Defaults to the default TaskScheduler provided in the pydash_app.impl.periodic_tasks package. - """ - - def initialize_dashboard(dashboard): - initialize_endpoints(dashboard) - initialize_endpoint_calls(dashboard) - - pt.add_background_task(name=_dashboard_init_task_name(dashboard), - task=partial(initialize_dashboard, dashboard), - scheduler=scheduler - ) - - _add_dashboard_to_fetch_from(dashboard, interval, scheduler) - - -def update_dashboard_fetching_interval(dashboard, interval=timedelta(hours=1), scheduler=pt.default_task_scheduler): - """ - Update the interval of the fetching of a dashboard w.r.t the scheduler, with the given interval. - :param dashboard: The Dashboard in question. - :param interval: The interval at which the endpoints should be fetched. This should be a datetime.timedelta object. - Defaults to 1 hour. - :param scheduler: The scheduler to update the interval of the fetching of. - Defaults to the default TaskScheduler provided in the pydash_app.impl.periodic_tasks package. - - NOTE: If the fetching of the dashboard has not yet been registered with the given scheduler, - this method will simply add the fetching of the dashboard data to the scheduler without initialising it with - remote endpoints nor seeding it with historic data. - """ - _add_dashboard_to_fetch_from(dashboard, interval, scheduler) - -def schedule_periodic_dashboard_fetching(interval=timedelta(hours=1), scheduler=pt.default_task_scheduler): - for dashboard in pydash_app.dashboard.dashboard_repository.all(): - print(f'Creating periodic task for {dashboard}') - _add_dashboard_to_fetch_from(dashboard=dashboard, interval=timedelta(seconds=5)) - - -def _add_dashboard_to_fetch_from(dashboard, interval=timedelta(hours=1), scheduler=pt.default_task_scheduler): - """ - Adds the fetching of data from `endpoint` of `dashboard` to the given scheduler. - :param dashboard: The Dashboard this Endpoint belongs to. - :param interval: The datetime.timedelta object indicating the interval of the fetching. - Defaults to 1 hour. - :param scheduler: The TaskScheduler we want to add this Endpoint-fetching to. - Defaults to the default scheduler that is provided in the pydash_app.impl.periodic_tasks package. - """ - - pt.add_periodic_task(name=_dashboard_fetch_task_name(dashboard), - interval=interval, - task=partial(_update_endpoint_calls_task, str(dashboard.id)), - scheduler=scheduler - ) - - -def _remove_dashboard_to_fetch_from(dashboard, scheduler=pt.default_task_scheduler): - """ - Removes an endpoint from the scheduler for that specific dashboard. - :param dashboard: The Dashboard the endpoint belongs to. - :param scheduler: The TaskScheduler to remove it from. - Defaults to the default scheduler that is provided in the pydash_app.impl.periodic_tasks package. - """ - pt.remove_task(name=_dashboard_fetch_task_name(dashboard), scheduler=scheduler) - - -def _dashboard_fetch_task_name(dashboard): - return f'fetch_{dashboard.id}' - - -def _dashboard_init_task_name(dashboard): - return f'init_{dashboard.id}' - - -def initialize_endpoints(dashboard): - """ - For a given dashboard, initialize it with the endpoints it has registered. - Note that this will not add endpoint call data. - :param dashboard: The dashboard to initialize with endpoints. - """ - - endpoints = _fetch_endpoints(dashboard) - - if endpoints is None: - return - - for endpoint in endpoints: - dashboard.add_endpoint(endpoint) - - update_dashboard(dashboard) - - -def _fetch_endpoints(dashboard): - """ - Fetches and returns a list of `Endpoint`s in the given dashboard. - :param dashboard: The dashboard for which to fetch endpoints. - :return: A list of `Endpoint`s for the dashboard. - """ - - monitor_rules = get_monitor_rules(dashboard.url, dashboard.token) - - if monitor_rules is None: - return None - - return [Endpoint(rule['endpoint'], rule['monitor']) for rule in monitor_rules] - - -def initialize_endpoint_calls(dashboard): - """ - For a given dashboard, retrieve all historical endpoint calls and store them in the database. - :param dashboard: The dashboard to initialize with historical data. - """ - - if dashboard.last_fetch_time is not None: - return - - details = get_details(dashboard.url) - first_request = int(details['first_request']) - - start_time = datetime.fromtimestamp(first_request, tz=timezone.utc) - current_time = datetime.now(timezone.utc) - - while start_time < current_time: - # TODO: for now historical data is pulled in chunks of 1 hour (hardcoded) - end_time = start_time + timedelta(hours=1) - - if end_time > current_time: - end_time = current_time - - endpoint_calls = _fetch_endpoint_calls(dashboard, start_time, end_time) - - if endpoint_calls is None: - continue - - for call in endpoint_calls: - dashboard.add_endpoint_call(call) - dashboard.last_fetch_time = call.time - - start_time = end_time - - update_dashboard(dashboard) - - -def _update_endpoint_calls_task(dashboard_id): - """ - Function to be used as a periodic task to update endpoints. - :param dashboard_id: The id of the dashboard to update. - """ - print("update endpoint calls task starting...") - - # pydash_app.impl.database.initialize_db_connection() - dashboard = find_dashboard(dashboard_id) - print(f"FOUND DASHBOARD {dashboard}") - update_endpoint_calls(dashboard) - print("update endpoint calls task ending...") - - -def update_endpoint_calls(dashboard): - """ - Retrieve the latest endpoint calls of the given dashboard and store them in the database. - :param dashboard: The dashboard for which to update endpoint calls. - """ - print(f"Updating endpoint calls for dashboard: {dashboard}") - - if dashboard.last_fetch_time is None: - return - - new_calls = _fetch_endpoint_calls(dashboard, time_from=dashboard.last_fetch_time) - print(f"New endpoint calls: {new_calls}") - - if new_calls is None: - return - - for call in new_calls: - dashboard.add_endpoint_call(call) - - dashboard.last_fetch_time = new_calls[-1].time - update_dashboard(dashboard) - print(f"Saved to database: dashboard {dashboard}") - - -def _fetch_endpoint_calls(dashboard, time_from=None, time_to=None): - """ - Fetches and returns a list of `EndpointCall`s for the given dashboard. - :param dashboard: The dashboard for which to fetch endpoint calls. - :param time_from: An optional datetime indicating only data since that moment should be returned. - :param time_to: An optional datetime indicating only data up to that point should be returned. - :return: A list of `EndpointCall`s containing the endpoint call data for this dashboard. - """ - - endpoint_requests = get_data(dashboard.url, dashboard.token, time_from, time_to) - - if endpoint_requests is None: - return None - - endpoint_calls = [] - for request in endpoint_requests: - # The raw endpoint call data contains a timestamp formatted - # as "yyyy-mm-dd hh:mm:ss.micro" so we need to parse it - time = datetime.strptime(request['time'], '%Y-%m-%d %H:%M:%S.%f') - time.replace(tzinfo=timezone.utc) - - call = EndpointCall( - request['endpoint'], - request['execution_time'], - time, - request['version'], - request['group_by'], - request['ip'] - ) - endpoint_calls.append(call) - - return endpoint_calls diff --git a/pydash/pydash_app/impl/__init__.py b/pydash/pydash_app/impl/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/pydash/pydash_app/user/__init__.py b/pydash/pydash_app/user/__init__.py index cf80e597..67c04b07 100644 --- a/pydash/pydash_app/user/__init__.py +++ b/pydash/pydash_app/user/__init__.py @@ -1,17 +1,58 @@ """ This module is the public interface (available to the web-application pydash_web) for interacting with Users. + + +Example Usage: + +>>> gandalf = User("Gandalf", "pass") +>>> add_to_repository(gandalf) +... +>>> found_user = find(gandalf.id) +>>> found_user.name == "Gandalf" +True + +You can also use a string-version of the ID to find the user again: + +>>> found_user = find(str(gandalf.id)) +>>> found_user.name == "Gandalf" +True + + +>>> found_user2 = find_by_name("Gandalf") +>>> found_user2 == found_user +True +>>> find_by_name("Dumbledore") +>>> # ^Returns nothing +>>> res_user = authenticate("Gandalf", "pass") +>>> res_user.name == "Gandalf" +True +>>> authenticate("Gandalf", "youshallnot") +>>> # ^Returns nothing +>>> authenticate("Dumbledore", "secrets") +>>> # ^Returns nothing """ from .user import User -import pydash_app.user.user_repository +import pydash_app.user.repository def add_to_repository(user): """ Adds the given User-entity to the user_repository. :param user: The User-entity in question. + + Adding the same user twice with the same name is not allowed: + + >>> gandalf1 = User("Gandalf", "pass") + >>> add_to_repository(gandalf1) + >>> gandalf2 = User("Gandalf", "balrog") + >>> add_to_repository(gandalf2) + Traceback (most recent call last): + ... + multi_indexed_collection.DuplicateIndexError + """ - user_repository.add(user) + repository.add(user) def find(user_id): @@ -19,14 +60,26 @@ def find(user_id): Returns a single User-entity with the given UUID or None if it could not be found. user_id- UUID of the user we hope to find.""" - return user_repository.find(user_id) + return repository.find(user_id) def maybe_find_user(user_id): """ Returns the User entity, or `None` if it does not exist. + + >>> user = User("Gandalf", "pass") + >>> add_to_repository(user) + ... + >>> found_user = maybe_find_user(user.id) + >>> found_user.name == "Gandalf" + True + >>> import uuid + >>> unexistent_uuid = uuid.UUID('ced84534-7a55-440f-ad77-9912466fe022') + >>> unexistent_user = maybe_find_user(unexistent_uuid) + >>> unexistent_user == None + True """ try: - return pydash_app.user.find(user_id) + return find(user_id) except KeyError: return None @@ -36,7 +89,7 @@ def find_by_name(name): name -- Name of the user we hope to find. """ - return user_repository.find_by_name(name) + return repository.find_by_name(name) def authenticate(name, password): diff --git a/pydash/pydash_app/user/user_repository.py b/pydash/pydash_app/user/repository.py similarity index 52% rename from pydash/pydash_app/user/user_repository.py rename to pydash/pydash_app/user/repository.py index a357a795..9dfdd503 100644 --- a/pydash/pydash_app/user/user_repository.py +++ b/pydash/pydash_app/user/repository.py @@ -14,18 +14,22 @@ import uuid import BTrees.OOBTree import transaction -from ..impl.database import database_root, MultiIndexedPersistentCollection +from pydash_database import database_root, MultiIndexedPersistentCollection +from multi_indexed_collection import DuplicateIndexError from .user import User if not hasattr(database_root(), 'users'): + transaction.begin() database_root().users = MultiIndexedPersistentCollection({'id', 'name'}) + transaction.commit() def find(user_id): # Ensure that also callable with strings or integers: - user_id = uuid.UUID(user_id) + if not isinstance(user_id, uuid.UUID): + user_id = uuid.UUID(user_id) return database_root().users['id', user_id] @@ -40,49 +44,59 @@ def find_by_name(name): def all(): + """ + Returns a (lazy) collection of all users (in no guaranteed order). + + >>> list(all()) + [] + >>> gandalf = User("Gandalf", "pass") + >>> dumbledore = User("Dumbledore", "secret") + >>> add(gandalf) + >>> add(dumbledore) + >>> sorted([user.name for user in all()]) + ['Dumbledore', 'Gandalf'] + >>> clear_all() + >>> sorted([user.name for user in all()]) + [] + """ return database_root().users.values() def add(user): try: + transaction.begin() database_root().users.add(user) transaction.commit() - except KeyError: + except (KeyError, DuplicateIndexError): transaction.abort() raise def update(user): - try: - database_root().users.update_item(user) - transaction.commit() - except KeyError: - transaction.abort() - raise + """ + Changes the user's information -def seed_users(): - """ - Stores some preliminary debug users in the datastore, - to be used during development. + >>> gandalf = User("GandalfTheGrey", "pass") + >>> add(gandalf) + >>> gandalf.name = "GandalfTheWhite" + >>> update(gandalf) + >>> find_by_name("GandalfTheGrey") == gandalf + False + >>> find_by_name("GandalfTheWhite") == gandalf + True + """ + transaction.commit() + for attempt in transaction.manager.attempts(): + with attempt: + database_root().users.update_item(user) + transaction.begin() - # Clear current DB. +def clear_all(): + """ + Flushes the database. + """ + transaction.begin() database_root().users = MultiIndexedPersistentCollection({'id', 'name'}) - - # Fill in users. - _development_users = [ - User(name="Alberto", password="alberto"), - User(name="Arjan", password="arjan"), - User(name="JeroenO", password="jeroeno"), - User(name="JeroenL", password="jeroenl"), - User(name="Koen", password="koen"), - User(name="Lars", password="lars"), - User(name="Patrick", password="patrick"), - User(name="Tom", password="tom"), - User(name="W-M", password="topsecret") - ] - for user in _development_users: - print("Adding user {}".format(user)) - add(user) - print("Seeding of users is done!") + transaction.commit() diff --git a/pydash/pydash_app/user/services/__init__.py b/pydash/pydash_app/user/services/__init__.py new file mode 100644 index 00000000..f3a74c93 --- /dev/null +++ b/pydash/pydash_app/user/services/__init__.py @@ -0,0 +1,7 @@ +""" +Contains services for the 'User' concern. + +These are things that use or manipulate 'User' entities to perform tasks, +where these tasks are either too complex to put in the User Entity, +or where these are heavily interacting with outside logic that the business domain entity should not concern itself with directly. +""" diff --git a/pydash/pydash_app/user/services/seeding.py b/pydash/pydash_app/user/services/seeding.py new file mode 100644 index 00000000..e911b4fe --- /dev/null +++ b/pydash/pydash_app/user/services/seeding.py @@ -0,0 +1,34 @@ +""" +Fills the application with some preliminary users +to make it easier to test code in development and staging environments. +""" + +from pydash_app.user.user import User +import pydash_app.user.repository as repository + + +def seed(): + """ + Stores some preliminary debug users in the datastore, + to be used during development. + """ + + # Clear current DB. + repository.clear_all() + + # Fill in users. + _development_users = [ + User(name="Alberto", password="alberto"), + User(name="Arjan", password="arjan"), + User(name="JeroenO", password="jeroeno"), + User(name="JeroenL", password="jeroenl"), + User(name="Koen", password="koen"), + User(name="Lars", password="lars"), + User(name="Patrick", password="patrick"), + User(name="Tom", password="tom"), + User(name="W-M", password="topsecret") + ] + for user in _development_users: + print("Adding user {}".format(user)) + repository.add(user) + print("Seeding of users is done!") diff --git a/pydash/pydash_app/user/user.py b/pydash/pydash_app/user/user.py index bdafa2d2..7fe4ac86 100644 --- a/pydash/pydash_app/user/user.py +++ b/pydash/pydash_app/user/user.py @@ -14,6 +14,14 @@ class User(persistent.Persistent, flask_login.UserMixin): Per Domain Driven Design, it does _not_ contain information on how to persistently store/load a user! (That is instead handled by the `user_repository`). + + + The User entity checks its parameters on creation: + + >>> User(42, 32) + Traceback (most recent call last): + ... + TypeError """ def __init__(self, name, password): @@ -25,6 +33,13 @@ def __init__(self, name, password): self.password_hash = generate_password_hash(password) def __repr__(self): + """ + The user has a string representation to be easily introspectable: + + >>> user = User("Gandalf", "pass") + >>> f"{user}".startswith("'.format(self.__class__.__name__, self.id, self.name) def get_id(self): @@ -36,4 +51,17 @@ def check_password(self, password): # Required because `multi_indexed_collection` puts users in a set, that needs to order its keys for fast lookup. # Because the IDs are unchanging integer values, use that. def __lt__(self, other): + """ + Users are ordered. This is a requirement because the persistence layer will store them in a dictionary with ordered keys. + + The actual order does not matter, as long as the same object always has the same location. + Therefore, we use the UUIDs for this. + + >>> gandalf = User("Gandalf", "pass") + >>> dumbledore = User("Dumbledore", "secret") + >>> gandalf < dumbledore or gandalf > dumbledore + True + >>> gandalf < gandalf + False + """ return self.id < other.id diff --git a/pydash/pydash_app/impl/database.py b/pydash/pydash_database/__init__.py similarity index 62% rename from pydash/pydash_app/impl/database.py rename to pydash/pydash_database/__init__.py index 818b799a..e54a1787 100644 --- a/pydash/pydash_app/impl/database.py +++ b/pydash/pydash_database/__init__.py @@ -4,11 +4,12 @@ import persistent import BTrees.OOBTree from multi_indexed_collection import MultiIndexedCollection +import transaction def _decide_database_address(): - if os.environ.get("TEST"): + if "TESTING" in os.environ: address, stop = ZEO.server() # <- in-memory server that's gone after closing of process. return address else: @@ -16,9 +17,23 @@ def _decide_database_address(): _database_address = _decide_database_address() -_database_root = None +_database_connection = None _current_process_id = None + + +def database_connection(): + global _database_connection + global _current_process_id + if not _database_connection or os.getpid() != _current_process_id: + _database_connection = ZEO.connection(_database_address) + _current_process_id = os.getpid() + transaction.begin() + # print(f"PID {os.getpid()}: Created new DB connection: {_database_connection} connecting to {_database_address}") + # else: + # print(f"PID {os.getpid()}: Returning old DB connection {_database_connection}") + return _database_connection + def database_root(): """ Returns the ZEO database root object. @@ -26,17 +41,7 @@ def database_root(): on each multiprocessing.Process. (on all subsequent calls on this process, the connection is re-used.) """ - global _database_root - global _current_process_id - if not _database_root or os.getpid() != _current_process_id: - _connection = ZEO.connection(_database_address) - _database_root = _connection.root - _current_process_id = os.getpid() - print(f"PID {os.getpid()}: Created new DB connection: {_connection} connecting to {_database_address} {_database_root}") - else: - - print(f"PID {os.getpid()}: returning old connection {_database_root}") - return _database_root + return database_connection().root class MultiIndexedPersistentCollection(MultiIndexedCollection, persistent.Persistent): def __init__(self, properties): diff --git a/pydash/pydash_app/impl/logger/__init__.py b/pydash/pydash_logger/__init__.py similarity index 100% rename from pydash/pydash_app/impl/logger/__init__.py rename to pydash/pydash_logger/__init__.py diff --git a/pydash/pydash_app/impl/logger/logger.py b/pydash/pydash_logger/logger.py similarity index 100% rename from pydash/pydash_app/impl/logger/logger.py rename to pydash/pydash_logger/logger.py diff --git a/pydash/pydash_web/__init__.py b/pydash/pydash_web/__init__.py index f9adae8b..deadc27f 100644 --- a/pydash/pydash_web/__init__.py +++ b/pydash/pydash_web/__init__.py @@ -3,14 +3,14 @@ Initializes a Flask web application, and loads the relevant configuration settings. """ - +import os from flask import Flask from flask_login import LoginManager from flask_cors import CORS from pydash_web.api import api as api_blueprint -from pydash_web.static import static as static_blueprint +from pydash_web.react_server import react_server as react_server_blueprint from config import Config @@ -18,17 +18,24 @@ import pydash_app.user -flask_webapp = Flask(__name__) +flask_webapp = Flask(__name__, +static_folder=None +) flask_webapp.config.from_object(Config) flask_webapp.config['CORS_HEADERS'] = 'Content-Type' cors = CORS(flask_webapp, resources={r"/api/*": {"origins": "*"}}, allow_headers=['Content-Type'], supports_credentials=True) # Only keep this line during development! flask_webapp.register_blueprint(api_blueprint) -flask_webapp.register_blueprint(static_blueprint) +flask_webapp.register_blueprint(react_server_blueprint) pydash_app.schedule_periodic_tasks() -pydash_app.start_task_scheduler() + + +# Don't autostart scheduler in the testing environment. +print(os.environ) +if 'TESTING' not in os.environ: + pydash_app.start_task_scheduler() login_manager = LoginManager(flask_webapp) @login_manager.user_loader diff --git a/pydash/pydash_web/controller/dashboards.py b/pydash/pydash_web/controller/dashboards.py index cb273151..b0c9e43d 100644 --- a/pydash/pydash_web/controller/dashboards.py +++ b/pydash/pydash_web/controller/dashboards.py @@ -8,11 +8,11 @@ from flask_login import current_user import pydash_app.dashboard -import pydash_app.impl.logger as pylog +import pydash_logger -from pydash_app.fetching.dashboard_fetch import update_endpoint_calls, _fetch_endpoint_calls +# from pydash_app.fetching.dashboard_fetch import update_endpoint_calls, _fetch_endpoint_calls -logger = pylog.Logger(__name__) +logger = pydash_logger.Logger(__name__) def dashboard(dashboard_id): @@ -28,9 +28,9 @@ def dashboard(dashboard_id): try: db = pydash_app.dashboard.find(dashboard_id) - logger.debug(f'Amount of newly fetched endpoint calls: {len(_fetch_endpoint_calls(db, db.last_fetch_time))}') + # logger.debug(f'Amount of newly fetched endpoint calls: {len(_fetch_endpoint_calls(db, db.last_fetch_time))}') - update_endpoint_calls(db) + # update_endpoint_calls(db) except KeyError: logger.warning(f"Could not find dashboard matching with {dashboard_id}") return jsonify({"message": "Could not find a matching dashboard."}), 404 diff --git a/pydash/pydash_web/controller/login.py b/pydash/pydash_web/controller/login.py index 6added7d..18376ff0 100644 --- a/pydash/pydash_web/controller/login.py +++ b/pydash/pydash_web/controller/login.py @@ -7,9 +7,9 @@ from flask_restplus.reqparse import RequestParser import pydash_app.user -import pydash_app.impl.logger as pylog +import pydash_logger -logger = pylog.Logger(__name__) +logger = pydash_logger.Logger(__name__) def login(): @@ -58,4 +58,4 @@ def _user_details(user): return { "id": user.id, "username": user.name - } \ No newline at end of file + } diff --git a/pydash/pydash_web/controller/logout.py b/pydash/pydash_web/controller/logout.py index afbf4741..92539dee 100644 --- a/pydash/pydash_web/controller/logout.py +++ b/pydash/pydash_web/controller/logout.py @@ -3,9 +3,9 @@ """ from flask import jsonify from flask_login import current_user, logout_user -import pydash_app.impl.logger as pylog +import pydash_logger -logger = pylog.Logger(__name__) +logger = pydash_logger.Logger(__name__) def logout(): diff --git a/pydash/pydash_web/controller/register_user.py b/pydash/pydash_web/controller/register_user.py index a5e029a7..beea0a20 100644 --- a/pydash/pydash_web/controller/register_user.py +++ b/pydash/pydash_web/controller/register_user.py @@ -6,10 +6,10 @@ from flask_restplus.reqparse import RequestParser import pydash_app.user -import pydash_app.impl.logger as pylog +import pydash_logger -logger = pylog.Logger(__name__) +logger = pydash_logger.Logger(__name__) def register_user(): diff --git a/pydash/pydash_web/react_server.py b/pydash/pydash_web/react_server.py new file mode 100644 index 00000000..ed42e7e5 --- /dev/null +++ b/pydash/pydash_web/react_server.py @@ -0,0 +1,25 @@ +from flask import Blueprint +import os +from flask import send_from_directory + +react_server = Blueprint( + 'pydash_web.react_server', + __name__, + # Override the static folder since this is `/static` by default, which clashes with React's file naming. + static_folder=None) + + +# Serve React App +@react_server.route('/', defaults={'path': ''}) +@react_server.route('/') +def serve(path): + print(f"SERVING REACT PATH: {path}") + if path != "" and os.path.exists( + os.path.abspath("../pydash-front/build/" + path)): + print(f"Serving file that exists: {path}") + return send_from_directory( + os.path.abspath('../pydash-front/build'), path) + else: + print("Serving standard index.html") + return send_from_directory( + os.path.abspath('../pydash-front/build'), 'index.html') diff --git a/pydash/pydash_web/static.py b/pydash/pydash_web/static.py deleted file mode 100644 index ac54f543..00000000 --- a/pydash/pydash_web/static.py +++ /dev/null @@ -1,7 +0,0 @@ -from flask import Blueprint - -static = Blueprint('pydash_web.static', __name__, - static_folder='../../pydash-front/build/static', - static_url_path='/static') - -import pydash_web.static_routes \ No newline at end of file diff --git a/pydash/pydash_web/static_routes.py b/pydash/pydash_web/static_routes.py deleted file mode 100644 index b7901688..00000000 --- a/pydash/pydash_web/static_routes.py +++ /dev/null @@ -1,14 +0,0 @@ -from flask import send_from_directory - -from pydash_web.static import static - - -@static.route('/service-worker.js') -def serve_worker(): - return send_from_directory('../../pydash-front/build', 'service-worker.js') - - -@static.route('/', defaults={'path': ''}) -@static.route('/') -def serve_react(path): - return send_from_directory('../../pydash-front/build', 'index.html') diff --git a/pydash/pytest.ini b/pydash/pytest.ini new file mode 100644 index 00000000..0720a2cb --- /dev/null +++ b/pydash/pytest.ini @@ -0,0 +1,5 @@ +[pytest] +env= + TESTING=True +addopts = --doctest-modules -n auto --cov-report term-missing --cov=pydash_app --cov=periodic_tasks +doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL