From 374b65560fc1d0895f275e67765226cae9e20e0a Mon Sep 17 00:00:00 2001 From: Kurt Griffiths Date: Sat, 4 Jan 2020 13:39:04 -0700 Subject: [PATCH] chore: Debug travis hang --- .travis.yml | 80 +++++++++--------- tests/asgi/_asgi_test_app.py | 143 +++++++++++++++++++++++++++++++ tests/asgi/test_asgi_servers.py | 144 ++------------------------------ 3 files changed, 192 insertions(+), 175 deletions(-) create mode 100644 tests/asgi/_asgi_test_app.py diff --git a/.travis.yml b/.travis.yml index c27fcfdd9..9949ffff1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,50 +9,50 @@ cache: matrix: include: - - python: pypy3.6-7.1.1 - env: TOXENV=pypy3 - - python: 3.8 - env: TOXENV=pep8 - - python: 3.8 - env: TOXENV=pep8-examples - - python: 3.8 - env: TOXENV=mypy - # NOTE(kgriffs): 3.5.2 is the default Python 3 version on Ubuntu 16.04 - # so we pin to that for testing to make sure we are working around - # and quirks that were fixed in later micro versions. - - python: 3.5.2 - env: TOXENV=py35 + # - python: pypy3.6-7.1.1 + # env: TOXENV=pypy3 + # - python: 3.8 + # env: TOXENV=pep8 + # - python: 3.8 + # env: TOXENV=pep8-examples + # - python: 3.8 + # env: TOXENV=mypy + # # NOTE(kgriffs): 3.5.2 is the default Python 3 version on Ubuntu 16.04 + # # so we pin to that for testing to make sure we are working around + # # and quirks that were fixed in later micro versions. + # - python: 3.5.2 + # env: TOXENV=py35 - python: 3.6 env: TOXENV=py36 - - python: 3.6 - env: TOXENV=py36_cython - - python: 3.7 - env: TOXENV=py37 - - python: 3.8 - env: TOXENV=py38 - - python: 3.8 - env: TOXENV=py38_cython - - python: 3.8 - # NOTE(vytas): Big-endian architecture - arch: s390x - os: linux - env: TOXENV=py38_cython - - python: 3.8 - env: TOXENV=py38_smoke - - python: 3.8 - env: TOXENV=py38_smoke_cython - - python: 3.8 - env: TOXENV=docs - # TODO(kgriffs): Re-enable once hug has a chance to address - # breaking changes in Falcon 3.0 + # - python: 3.6 + # env: TOXENV=py36_cython # - python: 3.7 - # env: TOXENV=hug - - python: 3.8 - env: TOXENV=look - - python: 3.8 - env: TOXENV=check_vendored + # env: TOXENV=py37 + # - python: 3.8 + # env: TOXENV=py38 + # - python: 3.8 + # env: TOXENV=py38_cython + # - python: 3.8 + # # NOTE(vytas): Big-endian architecture + # arch: s390x + # os: linux + # env: TOXENV=py38_cython + # - python: 3.8 + # env: TOXENV=py38_smoke + # - python: 3.8 + # env: TOXENV=py38_smoke_cython + # - python: 3.8 + # env: TOXENV=docs + # # TODO(kgriffs): Re-enable once hug has a chance to address + # # breaking changes in Falcon 3.0 + # # - python: 3.7 + # # env: TOXENV=hug + # - python: 3.8 + # env: TOXENV=look + # - python: 3.8 + # env: TOXENV=check_vendored -script: tox -- -v +script: tox -- -sv -k test_asgi_servers notifications: webhooks: diff --git a/tests/asgi/_asgi_test_app.py b/tests/asgi/_asgi_test_app.py new file mode 100644 index 000000000..4612dbe29 --- /dev/null +++ b/tests/asgi/_asgi_test_app.py @@ -0,0 +1,143 @@ +import asyncio +from collections import Counter +import os +import time + +import falcon +import falcon.asgi +import falcon.util + + +class Things: + def __init__(self): + self._counter = Counter() + + async def on_get(self, req, resp): + await asyncio.sleep(0.01) + resp.body = req.remote_addr + + async def on_post(self, req, resp): + resp.data = await req.stream.read(req.content_length or 0) + resp.set_header('X-Counter', str(self._counter['backround:things:on_post'])) + + async def background_job_async(): + await asyncio.sleep(0.01) + self._counter['backround:things:on_post'] += 1 + + def background_job_sync(): + time.sleep(0.01) + self._counter['backround:things:on_post'] += 1000 + + resp.schedule(background_job_async) + resp.schedule(background_job_sync) + resp.schedule(background_job_async) + resp.schedule(background_job_sync) + + async def on_put(self, req, resp): + # NOTE(kgriffs): Test that reading past the end does + # not hang. + + chunks = [] + for i in range(req.content_length + 1): + # NOTE(kgriffs): In the ASGI interface, bounded_stream is an + # alias for req.stream. We'll use the alias here just as + # a sanity check. + chunk = await req.bounded_stream.read(1) + chunks.append(chunk) + + # NOTE(kgriffs): body should really be set to a string, but + # Falcon is lenient and will allow bytes as well (although + # it is slightly less performant). + # TODO(kgriffs): Perhaps in Falcon 4.0 be more strict? We would + # also have to change the WSGI behavior to match. + resp.body = b''.join(chunks) + + # ================================================================= + # NOTE(kgriffs): Test the sync_to_async helpers here to make sure + # they work as expected in the context of a real ASGI server. + # ================================================================= + safely_tasks = [] + safely_values = [] + + def callmesafely(a, b, c=None): + # NOTE(kgriffs): Sleep to prove that there isn't another instance + # running in parallel that is able to race ahead. + time.sleep(0.001) + safely_values.append((a, b, c)) + + cms = falcon.util.wrap_sync_to_async(callmesafely, threadsafe=False) + loop = falcon.util.get_loop() + + # NOTE(kgriffs): Do not go much higher than 100 because daphne + # has a TBD bug with asyncioio,gather() for large numbers (say, + # 1000), and will cause a stack overflow in a for loop that + # awaits each task in turn. + num_cms_tasks = 100 + + for i in range(num_cms_tasks): + # NOTE(kgriffs): create_task() is used here, so that the coroutines + # are scheduled immediately in the order created; under Python + # 3.6, asyncio.gather() does not seem to always schedule + # them in order, so we do it this way to make it predictable. + safely_tasks.append(loop.create_task(cms(i, i + 1, c=i + 2))) + + await asyncio.gather(*safely_tasks) + + assert len(safely_values) == num_cms_tasks + for i, val in enumerate(safely_values): + assert safely_values[i] == (i, i + 1, i + 2) + + def callmeshirley(a=42, b=None): + return (a, b) + + assert (42, None) == await falcon.util.sync_to_async(callmeshirley) + assert (1, 2) == await falcon.util.sync_to_async(callmeshirley, 1, 2) + assert (5, None) == await falcon.util.sync_to_async(callmeshirley, 5) + assert (3, 4) == await falcon.util.sync_to_async(callmeshirley, 3, b=4) + + +class Bucket: + async def on_post(self, req, resp): + resp.body = await req.stream.read() + + +class Events: + async def on_get(self, req, resp): + async def emit(): + start = time.time() + while time.time() - start < 1: + yield falcon.asgi.SSEvent(text='hello world') + await asyncio.sleep(0.2) + + resp.sse = emit() + + +class LifespanHandler: + def __init__(self): + self.startup_succeeded = False + self.shutdown_succeeded = False + + async def process_startup(self, scope, event): + assert scope['type'] == 'lifespan' + assert event['type'] == 'lifespan.startup' + self.startup_succeeded = True + + async def process_shutdown(self, scope, event): + assert scope['type'] == 'lifespan' + assert event['type'] == 'lifespan.shutdown' + self.shutdown_succeeded = True + + +def create_app(): + app = falcon.asgi.App() + app.add_route('/', Things()) + app.add_route('/bucket', Bucket()) + app.add_route('/events', Events()) + + lifespan_handler = LifespanHandler() + app.add_middleware(lifespan_handler) + + return app + + +application = create_app() diff --git a/tests/asgi/test_asgi_servers.py b/tests/asgi/test_asgi_servers.py index 75621d394..93234f769 100644 --- a/tests/asgi/test_asgi_servers.py +++ b/tests/asgi/test_asgi_servers.py @@ -17,6 +17,8 @@ import falcon.util +_MODULE_DIR = os.path.abspath(os.path.dirname(__file__)) + _PYPY = platform.python_implementation() == 'PyPy' _SERVER_HOST = '127.0.0.1' @@ -24,9 +26,6 @@ _random = random.Random() -_MODULE_DIR = os.path.abspath(os.path.dirname(__file__)) -_MODULE_NAME, __ = os.path.splitext(os.path.basename(__file__)) - class TestASGIServer: @@ -37,7 +36,7 @@ def test_get(self, server_base_url): def test_put(self, server_base_url): body = '{}' - resp = requests.put(server_base_url, data=body) + resp = requests.put(server_base_url, data=body, timeout=5) assert resp.status_code == 200 assert resp.text == '{}' @@ -97,133 +96,23 @@ def test_sse(self, server_base_url): assert not events[-1] -class Things: - def __init__(self): - self._counter = Counter() - - async def on_get(self, req, resp): - await asyncio.sleep(0.01) - resp.body = req.remote_addr - - async def on_post(self, req, resp): - resp.data = await req.stream.read(req.content_length or 0) - resp.set_header('X-Counter', str(self._counter['backround:things:on_post'])) - - async def background_job_async(): - await asyncio.sleep(0.01) - self._counter['backround:things:on_post'] += 1 - - def background_job_sync(): - time.sleep(0.01) - self._counter['backround:things:on_post'] += 1000 - - resp.schedule(background_job_async) - resp.schedule(background_job_sync) - resp.schedule(background_job_async) - resp.schedule(background_job_sync) - - async def on_put(self, req, resp): - # NOTE(kgriffs): Test that reading past the end does - # not hang. - - chunks = [] - for i in range(req.content_length + 1): - # NOTE(kgriffs): In the ASGI interface, bounded_stream is an - # alias for req.stream. We'll use the alias here just as - # a sanity check. - chunk = await req.bounded_stream.read(1) - chunks.append(chunk) - - # NOTE(kgriffs): body should really be set to a string, but - # Falcon is lenient and will allow bytes as well (although - # it is slightly less performant). - # TODO(kgriffs): Perhaps in Falcon 4.0 be more strict? We would - # also have to change the WSGI behavior to match. - resp.body = b''.join(chunks) - - # ================================================================= - # NOTE(kgriffs): Test the sync_to_async helpers here to make sure - # they work as expected in the context of a real ASGI server. - # ================================================================= - safely_coroutine_objects = [] - safely_values = [] - - def callmesafely(a, b, c=None): - # NOTE(kgriffs): Sleep to prove that there isn't another instance - # running in parallel that is able to race ahead. - time.sleep(0.0001) - safely_values.append((a, b, c)) - - cms = falcon.util.wrap_sync_to_async(callmesafely, threadsafe=False) - loop = falcon.util.get_loop() - for i in range(1000): - # NOTE(kgriffs): create_task() is used here, so that the coroutines - # are scheduled immediately in the order created; under Python - # 3.6, asyncio.gather() does not seem to always schedule - # them in order, so we do it this way to make it predictable. - safely_coroutine_objects.append(loop.create_task(cms(i, i + 1, c=i + 2))) - - await asyncio.gather(*safely_coroutine_objects) - - assert len(safely_values) == 1000 - for i, val in enumerate(safely_values): - assert safely_values[i] == (i, i + 1, i + 2) - - def callmeshirley(a=42, b=None): - return (a, b) - - assert (42, None) == await falcon.util.sync_to_async(callmeshirley) - assert (1, 2) == await falcon.util.sync_to_async(callmeshirley, 1, 2) - assert (5, None) == await falcon.util.sync_to_async(callmeshirley, 5) - assert (3, 4) == await falcon.util.sync_to_async(callmeshirley, 3, b=4) - - -class Bucket: - async def on_post(self, req, resp): - resp.body = await req.stream.read() - - -class Events: - async def on_get(self, req, resp): - async def emit(): - start = time.time() - while time.time() - start < 1: - yield falcon.asgi.SSEvent(text='hello world') - await asyncio.sleep(0.2) - - resp.sse = emit() - - -class LifespanHandler: - def __init__(self): - self.startup_succeeded = False - self.shutdown_succeeded = False - - async def process_startup(self, scope, event): - assert scope['type'] == 'lifespan' - assert event['type'] == 'lifespan.startup' - self.startup_succeeded = True - - async def process_shutdown(self, scope, event): - assert scope['type'] == 'lifespan' - assert event['type'] == 'lifespan.shutdown' - self.shutdown_succeeded = True - - @contextmanager def _run_server_isolated(process_factory, host, port): # NOTE(kgriffs): We have to use subprocess because uvicorn has a tendency # to corrupt our asyncio state and cause intermittent hangs in the test # suite. + print('\n[Starting server process...]') server = process_factory(host, port) time.sleep(0.2) server.poll() startup_succeeded = (server.returncode is None) + print('\n[Server process start {}]'.format('succeeded' if startup_succeeded else 'failed')) if startup_succeeded: yield server + print('\n[Sending SIGTERM to server process...]') server.terminate() try: @@ -232,7 +121,7 @@ def _run_server_isolated(process_factory, host, port): server.kill() stdout_data, __ = server.communicate() - print(stdout_data.decode()) + print('\n' + stdout_data.decode()) assert server.returncode == 0 assert startup_succeeded @@ -246,7 +135,7 @@ def _uvicorn_factory(host, port): '--host', host, '--port', str(port), - _MODULE_NAME + ':application' + '_asgi_test_app:application' ) return subprocess.Popen( @@ -268,7 +157,7 @@ def _daphne_factory(host, port): '--verbosity', '2', '--access-log', '-', - _MODULE_NAME + ':application' + '_asgi_test_app:application' ), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, @@ -303,18 +192,3 @@ def server_base_url(request): break yield base_url - - -def create_app(): - app = falcon.asgi.App() - app.add_route('/', Things()) - app.add_route('/bucket', Bucket()) - app.add_route('/events', Events()) - - lifespan_handler = LifespanHandler() - app.add_middleware(lifespan_handler) - - return app - - -application = create_app()